title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
DEPR: replace without passing value
diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index 15e85d0f90c5e..4debd41de213f 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -207,6 +207,7 @@ Removal of prior version deprecations/changes - :meth:`SeriesGroupBy.agg` no longer pins the name of the group to the input passed to the provided ``func`` (:issue:`51703`) - All arguments except ``name`` in :meth:`Index.rename` are now keyword only (:issue:`56493`) - All arguments except the first ``path``-like argument in IO writers are now keyword only (:issue:`54229`) +- Disallow calling :meth:`Series.replace` or :meth:`DataFrame.replace` without a ``value`` and with non-dict-like ``to_replace`` (:issue:`33302`) - Disallow non-standard (``np.ndarray``, :class:`Index`, :class:`ExtensionArray`, or :class:`Series`) to :func:`isin`, :func:`unique`, :func:`factorize` (:issue:`52986`) - Disallow passing a pandas type to :meth:`Index.view` (:issue:`55709`) - Disallow units other than "s", "ms", "us", "ns" for datetime64 and timedelta64 dtypes in :func:`array` (:issue:`53817`) diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py index 930ee83aea00b..123dc679a83ea 100644 --- a/pandas/core/arrays/_mixins.py +++ b/pandas/core/arrays/_mixins.py @@ -296,13 +296,6 @@ def __getitem__( result = self._from_backing_data(result) return result - def _fill_mask_inplace( - self, method: str, limit: int | None, mask: npt.NDArray[np.bool_] - ) -> None: - # (for now) when self.ndim == 2, we assume axis=0 - func = missing.get_fill_func(method, ndim=self.ndim) - func(self._ndarray.T, limit=limit, mask=mask.T) - def _pad_or_backfill( self, *, diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index fdc839225a557..1855bd1368251 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -2111,25 +2111,6 @@ def _where(self, mask: npt.NDArray[np.bool_], value) -> Self: result[~mask] = val return result - # TODO(3.0): this can be removed once GH#33302 deprecation is enforced - def _fill_mask_inplace( - self, method: str, limit: int | None, mask: npt.NDArray[np.bool_] - ) -> None: - """ - Replace values in locations specified by 'mask' using pad or backfill. - - See also - -------- - ExtensionArray.fillna - """ - func = missing.get_fill_func(method) - npvalues = self.astype(object) - # NB: if we don't copy mask here, it may be altered inplace, which - # would mess up the `self[mask] = ...` below. - func(npvalues, limit=limit, mask=mask.copy()) - new_values = self._from_sequence(npvalues, dtype=self.dtype) - self[mask] = new_values[mask] - def _rank( self, *, diff --git a/pandas/core/generic.py b/pandas/core/generic.py index ea618ea088348..0243d7f6fc573 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -7319,17 +7319,8 @@ def replace( inplace: bool = False, regex: bool = False, ) -> Self | None: - if value is lib.no_default and not is_dict_like(to_replace) and regex is False: - # case that goes through _replace_single and defaults to method="pad" - warnings.warn( - # GH#33302 - f"{type(self).__name__}.replace without 'value' and with " - "non-dict-like 'to_replace' is deprecated " - "and will raise in a future version. " - "Explicitly specify the new values instead.", - FutureWarning, - stacklevel=find_stack_level(), - ) + if not is_bool(regex) and to_replace is not None: + raise ValueError("'to_replace' must be 'None' if 'regex' is not a bool") if not ( is_scalar(to_replace) @@ -7342,6 +7333,15 @@ def replace( f"{type(to_replace).__name__!r}" ) + if value is lib.no_default and not ( + is_dict_like(to_replace) or is_dict_like(regex) + ): + raise ValueError( + # GH#33302 + f"{type(self).__name__}.replace must specify either 'value', " + "a dict-like 'to_replace', or dict-like 'regex'." + ) + inplace = validate_bool_kwarg(inplace, "inplace") if inplace: if not PYPY: @@ -7352,41 +7352,10 @@ def replace( stacklevel=2, ) - if not is_bool(regex) and to_replace is not None: - raise ValueError("'to_replace' must be 'None' if 'regex' is not a bool") - if value is lib.no_default: - # GH#36984 if the user explicitly passes value=None we want to - # respect that. We have the corner case where the user explicitly - # passes value=None *and* a method, which we interpret as meaning - # they want the (documented) default behavior. - - # passing a single value that is scalar like - # when value is None (GH5319), for compat - if not is_dict_like(to_replace) and not is_dict_like(regex): - to_replace = [to_replace] - - if isinstance(to_replace, (tuple, list)): - # TODO: Consider copy-on-write for non-replaced columns's here - if isinstance(self, ABCDataFrame): - from pandas import Series - - result = self.apply( - Series._replace_single, - args=(to_replace, inplace), - ) - if inplace: - return None - return result - return self._replace_single(to_replace, inplace) - if not is_dict_like(to_replace): - if not is_dict_like(regex): - raise TypeError( - 'If "to_replace" and "value" are both None ' - 'and "to_replace" is not a list, then ' - "regex must be a mapping" - ) + # In this case we have checked above that + # 1) regex is dict-like and 2) to_replace is None to_replace = regex regex = True diff --git a/pandas/core/series.py b/pandas/core/series.py index 843788273a6ef..8b65ce679ab6b 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -97,7 +97,6 @@ algorithms, base, common as com, - missing, nanops, ops, roperator, @@ -5116,40 +5115,6 @@ def info( show_counts=show_counts, ) - @overload - def _replace_single(self, to_replace, inplace: Literal[False]) -> Self: ... - - @overload - def _replace_single(self, to_replace, inplace: Literal[True]) -> None: ... - - @overload - def _replace_single(self, to_replace, inplace: bool) -> Self | None: ... - - # TODO(3.0): this can be removed once GH#33302 deprecation is enforced - def _replace_single(self, to_replace, inplace: bool) -> Self | None: - """ - Replaces values in a Series using the fill method specified when no - replacement value is given in the replace method - """ - limit = None - method = "pad" - - result = self if inplace else self.copy() - - values = result._values - mask = missing.mask_missing(values, to_replace) - - if isinstance(values, ExtensionArray): - # dispatch to the EA's _pad_mask_inplace method - values._fill_mask_inplace(method, limit, mask) - else: - fill_f = missing.get_fill_func(method) - fill_f(values, limit=limit, mask=mask) - - if inplace: - return None - return result - def memory_usage(self, index: bool = True, deep: bool = False) -> int: """ Return the memory usage of the Series. diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py index 2d8517693a2f8..38a443b56ee3d 100644 --- a/pandas/core/shared_docs.py +++ b/pandas/core/shared_docs.py @@ -608,24 +608,7 @@ 4 None dtype: object - When ``value`` is not explicitly passed and `to_replace` is a scalar, list - or tuple, `replace` uses the method parameter (default 'pad') to do the - replacement. So this is why the 'a' values are being replaced by 10 - in rows 1 and 2 and 'b' in row 4 in this case. - - >>> s.replace('a') - 0 10 - 1 10 - 2 10 - 3 b - 4 b - dtype: object - - .. deprecated:: 2.1.0 - The 'method' parameter and padding behavior are deprecated. - - On the other hand, if ``None`` is explicitly passed for ``value``, it will - be respected: + If ``None`` is explicitly passed for ``value``, it will be respected: >>> s.replace('a', None) 0 10 diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index 3b9c342f35a71..fb7ba2b7af38a 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -1264,13 +1264,8 @@ def test_replace_invalid_to_replace(self): r"Expecting 'to_replace' to be either a scalar, array-like, " r"dict or None, got invalid type.*" ) - msg2 = ( - "DataFrame.replace without 'value' and with non-dict-like " - "'to_replace' is deprecated" - ) with pytest.raises(TypeError, match=msg): - with tm.assert_produces_warning(FutureWarning, match=msg2): - df.replace(lambda x: x.strip()) + df.replace(lambda x: x.strip()) @pytest.mark.parametrize("dtype", ["float", "float64", "int64", "Int64", "boolean"]) @pytest.mark.parametrize("value", [np.nan, pd.NA]) diff --git a/pandas/tests/series/methods/test_replace.py b/pandas/tests/series/methods/test_replace.py index 09a3469e73462..0a79bcea679a7 100644 --- a/pandas/tests/series/methods/test_replace.py +++ b/pandas/tests/series/methods/test_replace.py @@ -137,20 +137,15 @@ def test_replace_gh5319(self): # API change from 0.12? # GH 5319 ser = pd.Series([0, np.nan, 2, 3, 4]) - expected = ser.ffill() msg = ( - "Series.replace without 'value' and with non-dict-like " - "'to_replace' is deprecated" + "Series.replace must specify either 'value', " + "a dict-like 'to_replace', or dict-like 'regex'" ) - with tm.assert_produces_warning(FutureWarning, match=msg): - result = ser.replace([np.nan]) - tm.assert_series_equal(result, expected) + with pytest.raises(ValueError, match=msg): + ser.replace([np.nan]) - ser = pd.Series([0, np.nan, 2, 3, 4]) - expected = ser.ffill() - with tm.assert_produces_warning(FutureWarning, match=msg): - result = ser.replace(np.nan) - tm.assert_series_equal(result, expected) + with pytest.raises(ValueError, match=msg): + ser.replace(np.nan) def test_replace_datetime64(self): # GH 5797 @@ -182,19 +177,16 @@ def test_replace_timedelta_td64(self): def test_replace_with_single_list(self): ser = pd.Series([0, 1, 2, 3, 4]) - msg2 = ( - "Series.replace without 'value' and with non-dict-like " - "'to_replace' is deprecated" + msg = ( + "Series.replace must specify either 'value', " + "a dict-like 'to_replace', or dict-like 'regex'" ) - with tm.assert_produces_warning(FutureWarning, match=msg2): - result = ser.replace([1, 2, 3]) - tm.assert_series_equal(result, pd.Series([0, 0, 0, 0, 4])) + with pytest.raises(ValueError, match=msg): + ser.replace([1, 2, 3]) s = ser.copy() - with tm.assert_produces_warning(FutureWarning, match=msg2): - return_value = s.replace([1, 2, 3], inplace=True) - assert return_value is None - tm.assert_series_equal(s, pd.Series([0, 0, 0, 0, 4])) + with pytest.raises(ValueError, match=msg): + s.replace([1, 2, 3], inplace=True) def test_replace_mixed_types(self): ser = pd.Series(np.arange(5), dtype="int64") @@ -483,13 +475,8 @@ def test_replace_invalid_to_replace(self): r"Expecting 'to_replace' to be either a scalar, array-like, " r"dict or None, got invalid type.*" ) - msg2 = ( - "Series.replace without 'value' and with non-dict-like " - "'to_replace' is deprecated" - ) with pytest.raises(TypeError, match=msg): - with tm.assert_produces_warning(FutureWarning, match=msg2): - series.replace(lambda x: x.strip()) + series.replace(lambda x: x.strip()) @pytest.mark.parametrize("frame", [False, True]) def test_replace_nonbool_regex(self, frame):
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/58040
2024-03-28T02:03:54Z
2024-04-01T18:46:11Z
2024-04-01T18:46:11Z
2024-04-01T20:37:05Z
DEPR: replace method/limit keywords
diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index 2286a75f5d0c5..26dd6f83ad44a 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -207,6 +207,7 @@ Removal of prior version deprecations/changes - All arguments except ``name`` in :meth:`Index.rename` are now keyword only (:issue:`56493`) - All arguments except the first ``path``-like argument in IO writers are now keyword only (:issue:`54229`) - Removed "freq" keyword from :class:`PeriodArray` constructor, use "dtype" instead (:issue:`52462`) +- Removed deprecated "method" and "limit" keywords from :meth:`Series.replace` and :meth:`DataFrame.replace` (:issue:`53492`) - Removed the "closed" and "normalize" keywords in :meth:`DatetimeIndex.__new__` (:issue:`52628`) - Removed the "closed" and "unit" keywords in :meth:`TimedeltaIndex.__new__` (:issue:`52628`, :issue:`55499`) - All arguments in :meth:`Index.sort_values` are now keyword only (:issue:`56493`) diff --git a/pandas/conftest.py b/pandas/conftest.py index 65410c3c09494..34489bb70575a 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -150,7 +150,6 @@ def pytest_collection_modifyitems(items, config) -> None: ("is_categorical_dtype", "is_categorical_dtype is deprecated"), ("is_sparse", "is_sparse is deprecated"), ("DataFrameGroupBy.fillna", "DataFrameGroupBy.fillna is deprecated"), - ("NDFrame.replace", "The 'method' keyword"), ("NDFrame.replace", "Series.replace without 'value'"), ("NDFrame.clip", "Downcasting behavior in Series and DataFrame methods"), ("Series.idxmin", "The behavior of Series.idxmin"), diff --git a/pandas/core/generic.py b/pandas/core/generic.py index a7545fb8d98de..1b9d7f4c81c9f 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -7285,9 +7285,7 @@ def replace( value=..., *, inplace: Literal[False] = ..., - limit: int | None = ..., regex: bool = ..., - method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., ) -> Self: ... @overload @@ -7297,9 +7295,7 @@ def replace( value=..., *, inplace: Literal[True], - limit: int | None = ..., regex: bool = ..., - method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., ) -> None: ... @overload @@ -7309,9 +7305,7 @@ def replace( value=..., *, inplace: bool = ..., - limit: int | None = ..., regex: bool = ..., - method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., ) -> Self | None: ... @final @@ -7326,32 +7320,9 @@ def replace( value=lib.no_default, *, inplace: bool = False, - limit: int | None = None, regex: bool = False, - method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = lib.no_default, ) -> Self | None: - if method is not lib.no_default: - warnings.warn( - # GH#33302 - f"The 'method' keyword in {type(self).__name__}.replace is " - "deprecated and will be removed in a future version.", - FutureWarning, - stacklevel=find_stack_level(), - ) - elif limit is not None: - warnings.warn( - # GH#33302 - f"The 'limit' keyword in {type(self).__name__}.replace is " - "deprecated and will be removed in a future version.", - FutureWarning, - stacklevel=find_stack_level(), - ) - if ( - value is lib.no_default - and method is lib.no_default - and not is_dict_like(to_replace) - and regex is False - ): + if value is lib.no_default and not is_dict_like(to_replace) and regex is False: # case that goes through _replace_single and defaults to method="pad" warnings.warn( # GH#33302 @@ -7387,14 +7358,11 @@ def replace( if not is_bool(regex) and to_replace is not None: raise ValueError("'to_replace' must be 'None' if 'regex' is not a bool") - if value is lib.no_default or method is not lib.no_default: + if value is lib.no_default: # GH#36984 if the user explicitly passes value=None we want to # respect that. We have the corner case where the user explicitly # passes value=None *and* a method, which we interpret as meaning # they want the (documented) default behavior. - if method is lib.no_default: - # TODO: get this to show up as the default in the docs? - method = "pad" # passing a single value that is scalar like # when value is None (GH5319), for compat @@ -7408,12 +7376,12 @@ def replace( result = self.apply( Series._replace_single, - args=(to_replace, method, inplace, limit), + args=(to_replace, inplace), ) if inplace: return None return result - return self._replace_single(to_replace, method, inplace, limit) + return self._replace_single(to_replace, inplace) if not is_dict_like(to_replace): if not is_dict_like(regex): @@ -7458,9 +7426,7 @@ def replace( else: to_replace, value = keys, values - return self.replace( - to_replace, value, inplace=inplace, limit=limit, regex=regex - ) + return self.replace(to_replace, value, inplace=inplace, regex=regex) else: # need a non-zero len on all axes if not self.size: @@ -7524,9 +7490,7 @@ def replace( f"or a list or dict of strings or regular expressions, " f"you passed a {type(regex).__name__!r}" ) - return self.replace( - regex, value, inplace=inplace, limit=limit, regex=True - ) + return self.replace(regex, value, inplace=inplace, regex=True) else: # dest iterable dict-like if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1} diff --git a/pandas/core/series.py b/pandas/core/series.py index 0761dc17ab147..b0dc05fce7913 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -5113,28 +5113,22 @@ def info( ) @overload - def _replace_single( - self, to_replace, method: str, inplace: Literal[False], limit - ) -> Self: ... + def _replace_single(self, to_replace, inplace: Literal[False]) -> Self: ... @overload - def _replace_single( - self, to_replace, method: str, inplace: Literal[True], limit - ) -> None: ... + def _replace_single(self, to_replace, inplace: Literal[True]) -> None: ... @overload - def _replace_single( - self, to_replace, method: str, inplace: bool, limit - ) -> Self | None: ... + def _replace_single(self, to_replace, inplace: bool) -> Self | None: ... # TODO(3.0): this can be removed once GH#33302 deprecation is enforced - def _replace_single( - self, to_replace, method: str, inplace: bool, limit - ) -> Self | None: + def _replace_single(self, to_replace, inplace: bool) -> Self | None: """ Replaces values in a Series using the fill method specified when no replacement value is given in the replace method """ + limit = None + method = "pad" result = self if inplace else self.copy() diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py index a2b5439f9e12f..2d8517693a2f8 100644 --- a/pandas/core/shared_docs.py +++ b/pandas/core/shared_docs.py @@ -429,20 +429,11 @@ filled). Regular expressions, strings and lists or dicts of such objects are also allowed. {inplace} - limit : int, default None - Maximum size gap to forward or backward fill. - - .. deprecated:: 2.1.0 regex : bool or same types as `to_replace`, default False Whether to interpret `to_replace` and/or `value` as regular expressions. Alternatively, this could be a regular expression or a list, dict, or array of regular expressions in which case `to_replace` must be ``None``. - method : {{'pad', 'ffill', 'bfill'}} - The method to use when for replacement, when `to_replace` is a - scalar, list or tuple and `value` is ``None``. - - .. deprecated:: 2.1.0 Returns ------- @@ -538,14 +529,6 @@ 3 1 8 d 4 4 9 e - >>> s.replace([1, 2], method='bfill') - 0 3 - 1 3 - 2 3 - 3 4 - 4 5 - dtype: int64 - **dict-like `to_replace`** >>> df.replace({{0: 10, 1: 100}}) @@ -615,7 +598,7 @@ When one uses a dict as the `to_replace` value, it is like the value(s) in the dict are equal to the `value` parameter. ``s.replace({{'a': None}})`` is equivalent to - ``s.replace(to_replace={{'a': None}}, value=None, method=None)``: + ``s.replace(to_replace={{'a': None}}, value=None)``: >>> s.replace({{'a': None}}) 0 10 diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index eb6d649c296fc..3b9c342f35a71 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -1171,48 +1171,6 @@ def test_replace_with_empty_dictlike(self, mix_abc): tm.assert_frame_equal(df, df.replace({"b": {}})) tm.assert_frame_equal(df, df.replace(Series({"b": {}}))) - @pytest.mark.parametrize( - "to_replace, method, expected", - [ - (0, "bfill", {"A": [1, 1, 2], "B": [5, np.nan, 7], "C": ["a", "b", "c"]}), - ( - np.nan, - "bfill", - {"A": [0, 1, 2], "B": [5.0, 7.0, 7.0], "C": ["a", "b", "c"]}, - ), - ("d", "ffill", {"A": [0, 1, 2], "B": [5, np.nan, 7], "C": ["a", "b", "c"]}), - ( - [0, 2], - "bfill", - {"A": [1, 1, 2], "B": [5, np.nan, 7], "C": ["a", "b", "c"]}, - ), - ( - [1, 2], - "pad", - {"A": [0, 0, 0], "B": [5, np.nan, 7], "C": ["a", "b", "c"]}, - ), - ( - (1, 2), - "bfill", - {"A": [0, 2, 2], "B": [5, np.nan, 7], "C": ["a", "b", "c"]}, - ), - ( - ["b", "c"], - "ffill", - {"A": [0, 1, 2], "B": [5, np.nan, 7], "C": ["a", "a", "a"]}, - ), - ], - ) - def test_replace_method(self, to_replace, method, expected): - # GH 19632 - df = DataFrame({"A": [0, 1, 2], "B": [5, np.nan, 7], "C": ["a", "b", "c"]}) - - msg = "The 'method' keyword in DataFrame.replace is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg): - result = df.replace(to_replace=to_replace, value=None, method=method) - expected = DataFrame(expected) - tm.assert_frame_equal(result, expected) - @pytest.mark.parametrize( "replace_dict, final_data", [({"a": 1, "b": 1}, [[3, 3], [2, 2]]), ({"a": 1, "b": 2}, [[3, 1], [2, 3]])], diff --git a/pandas/tests/frame/test_subclass.py b/pandas/tests/frame/test_subclass.py index 355953eac9d51..7d18ef28a722d 100644 --- a/pandas/tests/frame/test_subclass.py +++ b/pandas/tests/frame/test_subclass.py @@ -742,18 +742,6 @@ def test_equals_subclass(self): assert df1.equals(df2) assert df2.equals(df1) - def test_replace_list_method(self): - # https://github.com/pandas-dev/pandas/pull/46018 - df = tm.SubclassedDataFrame({"A": [0, 1, 2]}) - msg = "The 'method' keyword in SubclassedDataFrame.replace is deprecated" - with tm.assert_produces_warning( - FutureWarning, match=msg, raise_on_extra_warnings=False - ): - result = df.replace([1, 2], method="ffill") - expected = tm.SubclassedDataFrame({"A": [0, 0, 0]}) - assert isinstance(result, tm.SubclassedDataFrame) - tm.assert_frame_equal(result, expected) - class MySubclassWithMetadata(DataFrame): _metadata = ["my_metadata"] diff --git a/pandas/tests/series/methods/test_replace.py b/pandas/tests/series/methods/test_replace.py index c7b894e73d0dd..09a3469e73462 100644 --- a/pandas/tests/series/methods/test_replace.py +++ b/pandas/tests/series/methods/test_replace.py @@ -196,19 +196,6 @@ def test_replace_with_single_list(self): assert return_value is None tm.assert_series_equal(s, pd.Series([0, 0, 0, 0, 4])) - # make sure things don't get corrupted when fillna call fails - s = ser.copy() - msg = ( - r"Invalid fill method\. Expecting pad \(ffill\) or backfill " - r"\(bfill\)\. Got crash_cymbal" - ) - msg3 = "The 'method' keyword in Series.replace is deprecated" - with pytest.raises(ValueError, match=msg): - with tm.assert_produces_warning(FutureWarning, match=msg3): - return_value = s.replace([1, 2, 3], inplace=True, method="crash_cymbal") - assert return_value is None - tm.assert_series_equal(s, ser) - def test_replace_mixed_types(self): ser = pd.Series(np.arange(5), dtype="int64") @@ -550,62 +537,6 @@ def test_replace_extension_other(self, frame_or_series): # should not have changed dtype tm.assert_equal(obj, result) - def _check_replace_with_method(self, ser: pd.Series): - df = ser.to_frame() - - msg1 = "The 'method' keyword in Series.replace is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg1): - res = ser.replace(ser[1], method="pad") - expected = pd.Series([ser[0], ser[0]] + list(ser[2:]), dtype=ser.dtype) - tm.assert_series_equal(res, expected) - - msg2 = "The 'method' keyword in DataFrame.replace is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg2): - res_df = df.replace(ser[1], method="pad") - tm.assert_frame_equal(res_df, expected.to_frame()) - - ser2 = ser.copy() - with tm.assert_produces_warning(FutureWarning, match=msg1): - res2 = ser2.replace(ser[1], method="pad", inplace=True) - assert res2 is None - tm.assert_series_equal(ser2, expected) - - with tm.assert_produces_warning(FutureWarning, match=msg2): - res_df2 = df.replace(ser[1], method="pad", inplace=True) - assert res_df2 is None - tm.assert_frame_equal(df, expected.to_frame()) - - def test_replace_ea_dtype_with_method(self, any_numeric_ea_dtype): - arr = pd.array([1, 2, pd.NA, 4], dtype=any_numeric_ea_dtype) - ser = pd.Series(arr) - - self._check_replace_with_method(ser) - - @pytest.mark.parametrize("as_categorical", [True, False]) - def test_replace_interval_with_method(self, as_categorical): - # in particular interval that can't hold NA - - idx = pd.IntervalIndex.from_breaks(range(4)) - ser = pd.Series(idx) - if as_categorical: - ser = ser.astype("category") - - self._check_replace_with_method(ser) - - @pytest.mark.parametrize("as_period", [True, False]) - @pytest.mark.parametrize("as_categorical", [True, False]) - def test_replace_datetimelike_with_method(self, as_period, as_categorical): - idx = pd.date_range("2016-01-01", periods=5, tz="US/Pacific") - if as_period: - idx = idx.tz_localize(None).to_period("D") - - ser = pd.Series(idx) - ser.iloc[-2] = pd.NaT - if as_categorical: - ser = ser.astype("category") - - self._check_replace_with_method(ser) - def test_replace_with_compiled_regex(self): # https://github.com/pandas-dev/pandas/issues/35680 s = pd.Series(["a", "b", "c"])
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/58039
2024-03-27T23:03:44Z
2024-03-28T00:04:36Z
2024-03-28T00:04:36Z
2024-03-28T00:14:45Z
CLN: remove axis keyword from Block.pad_or_backill
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index a7545fb8d98de..0a50ec2d4c5ce 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6727,12 +6727,10 @@ def _pad_or_backfill( axis = self._get_axis_number(axis) method = clean_fill_method(method) - if not self._mgr.is_single_block and axis == 1: - # e.g. test_align_fill_method - # TODO(3.0): once downcast is removed, we can do the .T - # in all axis=1 cases, and remove axis kward from mgr.pad_or_backfill. - if inplace: + if axis == 1: + if not self._mgr.is_single_block and inplace: raise NotImplementedError() + # e.g. test_align_fill_method result = self.T._pad_or_backfill( method=method, limit=limit, limit_area=limit_area ).T @@ -6741,7 +6739,6 @@ def _pad_or_backfill( new_mgr = self._mgr.pad_or_backfill( method=method, - axis=self._get_block_manager_axis(axis), limit=limit, limit_area=limit_area, inplace=inplace, diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index a7cdc7c39754d..468ec32ce7760 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1343,7 +1343,6 @@ def pad_or_backfill( self, *, method: FillnaOptions, - axis: AxisInt = 0, inplace: bool = False, limit: int | None = None, limit_area: Literal["inside", "outside"] | None = None, @@ -1357,16 +1356,12 @@ def pad_or_backfill( # Dispatch to the NumpyExtensionArray method. # We know self.array_values is a NumpyExtensionArray bc EABlock overrides vals = cast(NumpyExtensionArray, self.array_values) - if axis == 1: - vals = vals.T - new_values = vals._pad_or_backfill( + new_values = vals.T._pad_or_backfill( method=method, limit=limit, limit_area=limit_area, copy=copy, - ) - if axis == 1: - new_values = new_values.T + ).T data = extract_array(new_values, extract_numpy=True) return [self.make_block_same_class(data, refs=refs)] @@ -1814,7 +1809,6 @@ def pad_or_backfill( self, *, method: FillnaOptions, - axis: AxisInt = 0, inplace: bool = False, limit: int | None = None, limit_area: Literal["inside", "outside"] | None = None, @@ -1827,11 +1821,11 @@ def pad_or_backfill( elif limit_area is not None: raise NotImplementedError( f"{type(values).__name__} does not implement limit_area " - "(added in pandas 2.2). 3rd-party ExtnsionArray authors " + "(added in pandas 2.2). 3rd-party ExtensionArray authors " "need to add this argument to _pad_or_backfill." ) - if values.ndim == 2 and axis == 1: + if values.ndim == 2: # NDArrayBackedExtensionArray.fillna assumes axis=0 new_values = values.T._pad_or_backfill(**kwargs).T else:
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/58038
2024-03-27T22:10:48Z
2024-03-28T00:06:19Z
2024-03-28T00:06:19Z
2024-03-28T00:14:19Z
Backport PR #57758 on branch 2.2.x (BUG: DataFrame Interchange Protocol errors on Boolean columns)
diff --git a/doc/source/whatsnew/v2.2.2.rst b/doc/source/whatsnew/v2.2.2.rst index 54084abab7817..2a48403d9a318 100644 --- a/doc/source/whatsnew/v2.2.2.rst +++ b/doc/source/whatsnew/v2.2.2.rst @@ -22,6 +22,7 @@ Fixed regressions Bug fixes ~~~~~~~~~ +- :meth:`DataFrame.__dataframe__` was producing incorrect data buffers when the column's type was nullable boolean (:issue:`55332`) - :meth:`DataFrame.__dataframe__` was showing bytemask instead of bitmask for ``'string[pyarrow]'`` validity buffer (:issue:`57762`) - :meth:`DataFrame.__dataframe__` was showing non-null validity buffer (instead of ``None``) ``'string[pyarrow]'`` without missing values (:issue:`57761`) diff --git a/pandas/core/interchange/utils.py b/pandas/core/interchange/utils.py index 2a19dd5046aa3..fd1c7c9639242 100644 --- a/pandas/core/interchange/utils.py +++ b/pandas/core/interchange/utils.py @@ -144,6 +144,9 @@ def dtype_to_arrow_c_fmt(dtype: DtypeObj) -> str: elif isinstance(dtype, DatetimeTZDtype): return ArrowCTypes.TIMESTAMP.format(resolution=dtype.unit[0], tz=dtype.tz) + elif isinstance(dtype, pd.BooleanDtype): + return ArrowCTypes.BOOL + raise NotImplementedError( f"Conversion of {dtype} to Arrow C format string is not implemented." ) diff --git a/pandas/tests/interchange/test_impl.py b/pandas/tests/interchange/test_impl.py index 1ccada9116d4c..25418b8bb2b37 100644 --- a/pandas/tests/interchange/test_impl.py +++ b/pandas/tests/interchange/test_impl.py @@ -470,6 +470,7 @@ def test_non_str_names_w_duplicates(): ), ([1.0, 2.25, None], "Float32", "float32"), ([1.0, 2.25, None], "Float32[pyarrow]", "float32"), + ([True, False, None], "boolean", "bool"), ([True, False, None], "boolean[pyarrow]", "bool"), (["much ado", "about", None], "string[pyarrow_numpy]", "large_string"), (["much ado", "about", None], "string[pyarrow]", "large_string"), @@ -532,6 +533,7 @@ def test_pandas_nullable_with_missing_values( ), ([1.0, 2.25, 5.0], "Float32", "float32"), ([1.0, 2.25, 5.0], "Float32[pyarrow]", "float32"), + ([True, False, False], "boolean", "bool"), ([True, False, False], "boolean[pyarrow]", "bool"), (["much ado", "about", "nothing"], "string[pyarrow_numpy]", "large_string"), (["much ado", "about", "nothing"], "string[pyarrow]", "large_string"),
Backport PR #57758: BUG: DataFrame Interchange Protocol errors on Boolean columns
https://api.github.com/repos/pandas-dev/pandas/pulls/58036
2024-03-27T17:48:23Z
2024-03-27T19:02:51Z
2024-03-27T19:02:51Z
2024-03-27T19:02:52Z
DOC: pd to_datetime changed exact parameter to match docs
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index c416db4083f9a..5db222eb0ea7c 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -665,7 +665,7 @@ def to_datetime( yearfirst: bool = False, utc: bool = False, format: str | None = None, - exact: bool | lib.NoDefault = lib.no_default, + exact: bool = True, unit: str | None = None, origin: str = "unix", cache: bool = True,
- [ ] closes #55239 (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/58035
2024-03-27T17:43:31Z
2024-03-27T23:22:58Z
null
2024-03-27T23:23:03Z
Backport PR #57548 on branch 2.2.x (Fix accidental loss-of-precision for to_datetime(str, unit=...))
diff --git a/doc/source/whatsnew/v2.2.2.rst b/doc/source/whatsnew/v2.2.2.rst index 54084abab7817..19539918b8c8f 100644 --- a/doc/source/whatsnew/v2.2.2.rst +++ b/doc/source/whatsnew/v2.2.2.rst @@ -15,7 +15,7 @@ Fixed regressions ~~~~~~~~~~~~~~~~~ - :meth:`DataFrame.__dataframe__` was producing incorrect data buffers when the a column's type was a pandas nullable on with missing values (:issue:`56702`) - :meth:`DataFrame.__dataframe__` was producing incorrect data buffers when the a column's type was a pyarrow nullable on with missing values (:issue:`57664`) -- +- Fixed regression in precision of :func:`to_datetime` with string and ``unit`` input (:issue:`57051`) .. --------------------------------------------------------------------------- .. _whatsnew_222.bug_fixes: diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 017fdc4bc834f..dd23c2f27ca09 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -277,7 +277,7 @@ def array_with_unit_to_datetime( bint is_raise = errors == "raise" ndarray[int64_t] iresult tzinfo tz = None - float fval + double fval assert is_ignore or is_coerce or is_raise diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py index 6791ac0340640..a1ed996dade8e 100644 --- a/pandas/tests/tools/test_to_datetime.py +++ b/pandas/tests/tools/test_to_datetime.py @@ -1912,6 +1912,14 @@ def test_unit(self, cache): with pytest.raises(ValueError, match=msg): to_datetime([1], unit="D", format="%Y%m%d", cache=cache) + def test_unit_str(self, cache): + # GH 57051 + # Test that strs aren't dropping precision to 32-bit accidentally. + with tm.assert_produces_warning(FutureWarning): + res = to_datetime(["1704660000"], unit="s", origin="unix") + expected = to_datetime([1704660000], unit="s", origin="unix") + tm.assert_index_equal(res, expected) + def test_unit_array_mixed_nans(self, cache): values = [11111111111111111, 1, 1.0, iNaT, NaT, np.nan, "NaT", ""] result = to_datetime(values, unit="D", errors="ignore", cache=cache)
Backport PR #57548: Fix accidental loss-of-precision for to_datetime(str, unit=...)
https://api.github.com/repos/pandas-dev/pandas/pulls/58034
2024-03-27T16:52:23Z
2024-03-27T17:51:23Z
2024-03-27T17:51:23Z
2024-03-27T17:51:23Z
DOC: DataFrame.reset_index names param can't be a tuple as docs state
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b218dd899c8f8..50a93994dc76b 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -6011,8 +6011,8 @@ def reset_index( names : int, str or 1-dimensional list, default None Using the given string, rename the DataFrame column which contains the - index data. If the DataFrame has a MultiIndex, this has to be a list or - tuple with length equal to the number of levels. + index data. If the DataFrame has a MultiIndex, this has to be a list + with length equal to the number of levels. .. versionadded:: 1.5.0
- [ ] closes #57994 - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
https://api.github.com/repos/pandas-dev/pandas/pulls/58032
2024-03-27T16:12:00Z
2024-03-29T18:34:34Z
2024-03-29T18:34:34Z
2024-03-29T18:34:42Z
BUG: Fixed DataFrameGroupBy.transform with numba returning the wrong order with non increasing indexes #57069
diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index a398b93b60018..2f23a240bdcd1 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -303,6 +303,7 @@ Bug fixes - Fixed bug in :class:`SparseDtype` for equal comparison with na fill value. (:issue:`54770`) - Fixed bug in :meth:`DataFrame.join` inconsistently setting result index name (:issue:`55815`) - Fixed bug in :meth:`DataFrame.to_string` that raised ``StopIteration`` with nested DataFrames. (:issue:`16098`) +- Fixed bug in :meth:`DataFrame.transform` that was returning the wrong order unless the index was monotonically increasing. (:issue:`57069`) - Fixed bug in :meth:`DataFrame.update` bool dtype being converted to object (:issue:`55509`) - Fixed bug in :meth:`DataFrameGroupBy.apply` that was returning a completely empty DataFrame when all return values of ``func`` were ``None`` instead of returning an empty DataFrame with the original columns and dtypes. (:issue:`57775`) - Fixed bug in :meth:`Series.diff` allowing non-integer values for the ``periods`` argument. (:issue:`56607`) diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 0b61938d474b9..bd8e222831d0c 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1439,6 +1439,7 @@ def _transform_with_numba(self, func, *args, engine_kwargs=None, **kwargs): data and indices into a Numba jitted function. """ data = self._obj_with_exclusions + index_sorting = self._grouper.result_ilocs df = data if data.ndim == 2 else data.to_frame() starts, ends, sorted_index, sorted_data = self._numba_prep(df) @@ -1456,7 +1457,7 @@ def _transform_with_numba(self, func, *args, engine_kwargs=None, **kwargs): ) # result values needs to be resorted to their original positions since we # evaluated the data sorted by group - result = result.take(np.argsort(sorted_index), axis=0) + result = result.take(np.argsort(index_sorting), axis=0) index = data.index if data.ndim == 1: result_kwargs = {"name": data.name} diff --git a/pandas/tests/groupby/transform/test_numba.py b/pandas/tests/groupby/transform/test_numba.py index b75113d3f4e14..a17d25b2e7e2e 100644 --- a/pandas/tests/groupby/transform/test_numba.py +++ b/pandas/tests/groupby/transform/test_numba.py @@ -181,10 +181,25 @@ def f(values, index): df = DataFrame({"group": ["A", "A", "B"], "v": [4, 5, 6]}, index=[-1, -2, -3]) result = df.groupby("group").transform(f, engine="numba") - expected = DataFrame([-4.0, -3.0, -2.0], columns=["v"], index=[-1, -2, -3]) + expected = DataFrame([-2.0, -3.0, -4.0], columns=["v"], index=[-1, -2, -3]) tm.assert_frame_equal(result, expected) +def test_index_order_consistency_preserved(): + # GH 57069 + pytest.importorskip("numba") + + def f(values, index): + return values + + df = DataFrame( + {"vals": [0.0, 1.0, 2.0, 3.0], "group": [0, 1, 0, 1]}, index=range(3, -1, -1) + ) + result = df.groupby("group")["vals"].transform(f, engine="numba") + expected = Series([0.0, 1.0, 2.0, 3.0], index=range(3, -1, -1), name="vals") + tm.assert_series_equal(result, expected) + + def test_engine_kwargs_not_cached(): # If the user passes a different set of engine_kwargs don't return the same # jitted function
- [X] closes #57069 - [X] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [X] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [X] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [X] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. DataFrameGroupBy.transform with numba was returning the wrong order unless the index was monotonically increasing due to the transformed results not being correctly reordered. Fixed the test "pandas/tests/groupby/transform/test_numba.py::test_index_data_correctly_passed" to expect the correct order in the result. Added a test "pandas/tests/groupby/transform/test_numba.py::test_index_order_consistency_preserved" to test DataFrameGroupBy.transform with engine='numba' with a decreasing index.
https://api.github.com/repos/pandas-dev/pandas/pulls/58030
2024-03-27T13:50:55Z
2024-03-28T22:42:30Z
2024-03-28T22:42:30Z
2024-03-28T22:42:39Z
CLN: enforce `any/all` deprecation with `datetime64`
diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index 549d49aaa1853..011f72868ab5d 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -212,6 +212,7 @@ Removal of prior version deprecations/changes - All arguments in :meth:`Series.to_dict` are now keyword only (:issue:`56493`) - Changed the default value of ``observed`` in :meth:`DataFrame.groupby` and :meth:`Series.groupby` to ``True`` (:issue:`51811`) - Enforce deprecation in :func:`testing.assert_series_equal` and :func:`testing.assert_frame_equal` with object dtype and mismatched null-like values, which are now considered not-equal (:issue:`18463`) +- Enforced deprecation ``all`` and ``any`` reductions with ``datetime64`` and :class:`DatetimeTZDtype` dtypes (:issue:`58029`) - Enforced deprecation disallowing parsing datetimes with mixed time zones unless user passes ``utc=True`` to :func:`to_datetime` (:issue:`57275`) - Enforced deprecation in :meth:`Series.value_counts` and :meth:`Index.value_counts` with object dtype performing dtype inference on the ``.index`` of the result (:issue:`56161`) - Enforced deprecation of :meth:`.DataFrameGroupBy.get_group` and :meth:`.SeriesGroupBy.get_group` allowing the ``name`` argument to be a non-tuple when grouping by a list of length 1 (:issue:`54155`) diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 3dc2d77bb5a19..52cb175ca79a2 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -1661,16 +1661,8 @@ def _groupby_op( dtype = self.dtype if dtype.kind == "M": # Adding/multiplying datetimes is not valid - if how in ["sum", "prod", "cumsum", "cumprod", "var", "skew"]: - raise TypeError(f"datetime64 type does not support {how} operations") - if how in ["any", "all"]: - # GH#34479 - warnings.warn( - f"'{how}' with datetime64 dtypes is deprecated and will raise in a " - f"future version. Use (obj != pd.Timestamp(0)).{how}() instead.", - FutureWarning, - stacklevel=find_stack_level(), - ) + if how in ["any", "all", "sum", "prod", "cumsum", "cumprod", "var", "skew"]: + raise TypeError(f"datetime64 type does not support operation: '{how}'") elif isinstance(dtype, PeriodDtype): # Adding/multiplying Periods is not valid @@ -2217,11 +2209,11 @@ def ceil( # Reductions def any(self, *, axis: AxisInt | None = None, skipna: bool = True) -> bool: - # GH#34479 the nanops call will issue a FutureWarning for non-td64 dtype + # GH#34479 the nanops call will raise a TypeError for non-td64 dtype return nanops.nanany(self._ndarray, axis=axis, skipna=skipna, mask=self.isna()) def all(self, *, axis: AxisInt | None = None, skipna: bool = True) -> bool: - # GH#34479 the nanops call will issue a FutureWarning for non-td64 dtype + # GH#34479 the nanops call will raise a TypeError for non-td64 dtype return nanops.nanall(self._ndarray, axis=axis, skipna=skipna, mask=self.isna()) diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index b68337d9e0de9..a124e8679ae8e 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -31,7 +31,6 @@ npt, ) from pandas.compat._optional import import_optional_dependency -from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_complex, @@ -521,12 +520,7 @@ def nanany( if values.dtype.kind == "M": # GH#34479 - warnings.warn( - "'any' with datetime64 dtypes is deprecated and will raise in a " - "future version. Use (obj != pd.Timestamp(0)).any() instead.", - FutureWarning, - stacklevel=find_stack_level(), - ) + raise TypeError("datetime64 type does not support operation: 'any'") values, _ = _get_values(values, skipna, fill_value=False, mask=mask) @@ -582,12 +576,7 @@ def nanall( if values.dtype.kind == "M": # GH#34479 - warnings.warn( - "'all' with datetime64 dtypes is deprecated and will raise in a " - "future version. Use (obj != pd.Timestamp(0)).all() instead.", - FutureWarning, - stacklevel=find_stack_level(), - ) + raise TypeError("datetime64 type does not support operation: 'all'") values, _ = _get_values(values, skipna, fill_value=True, mask=mask) diff --git a/pandas/tests/extension/base/groupby.py b/pandas/tests/extension/base/groupby.py index 414683b02dcba..dcbbac44d083a 100644 --- a/pandas/tests/extension/base/groupby.py +++ b/pandas/tests/extension/base/groupby.py @@ -162,8 +162,10 @@ def test_in_numeric_groupby(self, data_for_grouping): msg = "|".join( [ - # period/datetime + # period "does not support sum operations", + # datetime + "does not support operation: 'sum'", # all others re.escape(f"agg function failed [how->sum,dtype->{dtype}"), ] diff --git a/pandas/tests/extension/test_datetime.py b/pandas/tests/extension/test_datetime.py index 06e85f5c92913..5de4865feb6f9 100644 --- a/pandas/tests/extension/test_datetime.py +++ b/pandas/tests/extension/test_datetime.py @@ -104,10 +104,8 @@ def _supports_reduction(self, obj, op_name: str) -> bool: @pytest.mark.parametrize("skipna", [True, False]) def test_reduce_series_boolean(self, data, all_boolean_reductions, skipna): meth = all_boolean_reductions - msg = f"'{meth}' with datetime64 dtypes is deprecated and will raise in" - with tm.assert_produces_warning( - FutureWarning, match=msg, check_stacklevel=False - ): + msg = f"datetime64 type does not support operation: '{meth}'" + with pytest.raises(TypeError, match=msg): super().test_reduce_series_boolean(data, all_boolean_reductions, skipna) def test_series_constructor(self, data): diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py index 408cb0ab6fc5c..7aa3de7afe579 100644 --- a/pandas/tests/frame/test_reductions.py +++ b/pandas/tests/frame/test_reductions.py @@ -1371,10 +1371,6 @@ def test_any_all_object_dtype( expected = Series([True, True, val, True]) tm.assert_series_equal(result, expected) - # GH#50947 deprecates this but it is not emitting a warning in some builds. - @pytest.mark.filterwarnings( - "ignore:'any' with datetime64 dtypes is deprecated.*:FutureWarning" - ) def test_any_datetime(self): # GH 23070 float_data = [1, np.nan, 3, np.nan] @@ -1386,10 +1382,9 @@ def test_any_datetime(self): ] df = DataFrame({"A": float_data, "B": datetime_data}) - result = df.any(axis=1) - - expected = Series([True, True, True, False]) - tm.assert_series_equal(result, expected) + msg = "datetime64 type does not support operation: 'any'" + with pytest.raises(TypeError, match=msg): + df.any(axis=1) def test_any_all_bool_only(self): # GH 25101 @@ -1481,23 +1476,23 @@ def test_any_all_np_func(self, func, data, expected): TypeError, match="dtype category does not support reduction" ): getattr(DataFrame(data), func.__name__)(axis=None) - else: - msg = "'(any|all)' with datetime64 dtypes is deprecated" - if data.dtypes.apply(lambda x: x.kind == "M").any(): - warn = FutureWarning - else: - warn = None + if data.dtypes.apply(lambda x: x.kind == "M").any(): + # GH#34479 + msg = "datetime64 type does not support operation: '(any|all)'" + with pytest.raises(TypeError, match=msg): + func(data) + + # method version + with pytest.raises(TypeError, match=msg): + getattr(DataFrame(data), func.__name__)(axis=None) - with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False): - # GH#34479 - result = func(data) + elif data.dtypes.apply(lambda x: x != "category").any(): + result = func(data) assert isinstance(result, np.bool_) assert result.item() is expected # method version - with tm.assert_produces_warning(warn, match=msg): - # GH#34479 - result = getattr(DataFrame(data), func.__name__)(axis=None) + result = getattr(DataFrame(data), func.__name__)(axis=None) assert isinstance(result, np.bool_) assert result.item() is expected diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 7ec1598abf403..bcad88bdecabb 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -674,7 +674,7 @@ def test_raises_on_nuisance(df): df = df.loc[:, ["A", "C", "D"]] df["E"] = datetime.now() grouped = df.groupby("A") - msg = "datetime64 type does not support sum operations" + msg = "datetime64 type does not support operation: 'sum'" with pytest.raises(TypeError, match=msg): grouped.agg("sum") with pytest.raises(TypeError, match=msg): diff --git a/pandas/tests/groupby/test_raises.py b/pandas/tests/groupby/test_raises.py index f9d5de72eda1d..7af27d7227035 100644 --- a/pandas/tests/groupby/test_raises.py +++ b/pandas/tests/groupby/test_raises.py @@ -241,16 +241,16 @@ def test_groupby_raises_datetime( return klass, msg = { - "all": (None, ""), - "any": (None, ""), + "all": (TypeError, "datetime64 type does not support operation: 'all'"), + "any": (TypeError, "datetime64 type does not support operation: 'any'"), "bfill": (None, ""), "corrwith": (TypeError, "cannot perform __mul__ with this index type"), "count": (None, ""), "cumcount": (None, ""), "cummax": (None, ""), "cummin": (None, ""), - "cumprod": (TypeError, "datetime64 type does not support cumprod operations"), - "cumsum": (TypeError, "datetime64 type does not support cumsum operations"), + "cumprod": (TypeError, "datetime64 type does not support operation: 'cumprod'"), + "cumsum": (TypeError, "datetime64 type does not support operation: 'cumsum'"), "diff": (None, ""), "ffill": (None, ""), "fillna": (None, ""), @@ -265,7 +265,7 @@ def test_groupby_raises_datetime( "ngroup": (None, ""), "nunique": (None, ""), "pct_change": (TypeError, "cannot perform __truediv__ with this index type"), - "prod": (TypeError, "datetime64 type does not support prod"), + "prod": (TypeError, "datetime64 type does not support operation: 'prod'"), "quantile": (None, ""), "rank": (None, ""), "sem": (None, ""), @@ -276,18 +276,16 @@ def test_groupby_raises_datetime( "|".join( [ r"dtype datetime64\[ns\] does not support reduction", - "datetime64 type does not support skew operations", + "datetime64 type does not support operation: 'skew'", ] ), ), "std": (None, ""), - "sum": (TypeError, "datetime64 type does not support sum operations"), - "var": (TypeError, "datetime64 type does not support var operations"), + "sum": (TypeError, "datetime64 type does not support operation: 'sum"), + "var": (TypeError, "datetime64 type does not support operation: 'var'"), }[groupby_func] - if groupby_func in ["any", "all"]: - warn_msg = f"'{groupby_func}' with datetime64 dtypes is deprecated" - elif groupby_func == "fillna": + if groupby_func == "fillna": kind = "Series" if groupby_series else "DataFrame" warn_msg = f"{kind}GroupBy.fillna is deprecated" else: diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py index 46f6367fbb3ed..117114c4c2cab 100644 --- a/pandas/tests/groupby/transform/test_transform.py +++ b/pandas/tests/groupby/transform/test_transform.py @@ -749,6 +749,7 @@ def test_cython_transform_frame_column( msg = "|".join( [ "does not support .* operations", + "does not support operation", ".* is not supported for object dtype", "is not implemented for this dtype", ] diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py index b10319f5380e7..048553330c1ce 100644 --- a/pandas/tests/reductions/test_reductions.py +++ b/pandas/tests/reductions/test_reductions.py @@ -1009,32 +1009,41 @@ def test_any_all_datetimelike(self): ser = Series(dta) df = DataFrame(ser) - msg = "'(any|all)' with datetime64 dtypes is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg): - # GH#34479 - assert dta.all() - assert dta.any() + # GH#34479 + msg = "datetime64 type does not support operation: '(any|all)'" + with pytest.raises(TypeError, match=msg): + dta.all() + with pytest.raises(TypeError, match=msg): + dta.any() - assert ser.all() - assert ser.any() + with pytest.raises(TypeError, match=msg): + ser.all() + with pytest.raises(TypeError, match=msg): + ser.any() - assert df.any().all() - assert df.all().all() + with pytest.raises(TypeError, match=msg): + df.any().all() + with pytest.raises(TypeError, match=msg): + df.all().all() dta = dta.tz_localize("UTC") ser = Series(dta) df = DataFrame(ser) + # GH#34479 + with pytest.raises(TypeError, match=msg): + dta.all() + with pytest.raises(TypeError, match=msg): + dta.any() - with tm.assert_produces_warning(FutureWarning, match=msg): - # GH#34479 - assert dta.all() - assert dta.any() - - assert ser.all() - assert ser.any() + with pytest.raises(TypeError, match=msg): + ser.all() + with pytest.raises(TypeError, match=msg): + ser.any() - assert df.any().all() - assert df.all().all() + with pytest.raises(TypeError, match=msg): + df.any().all() + with pytest.raises(TypeError, match=msg): + df.all().all() tda = dta - dta[0] ser = Series(tda) diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py index f3b9c909290a8..9b442fa7dbd07 100644 --- a/pandas/tests/resample/test_resample_api.py +++ b/pandas/tests/resample/test_resample_api.py @@ -708,7 +708,9 @@ def test_selection_api_validation(): tm.assert_frame_equal(exp, result) exp.index.name = "d" - with pytest.raises(TypeError, match="datetime64 type does not support sum"): + with pytest.raises( + TypeError, match="datetime64 type does not support operation: 'sum'" + ): df.resample("2D", level="d").sum() result = df.resample("2D", level="d").sum(numeric_only=True) tm.assert_frame_equal(exp, result)
xref #50947, xref #58006 enforced deprecation of `any/all` with `datetime64`
https://api.github.com/repos/pandas-dev/pandas/pulls/58029
2024-03-27T12:41:40Z
2024-03-28T17:54:36Z
2024-03-28T17:54:36Z
2024-03-28T17:54:44Z
Fix DataFrame.cumsum failing when dtype is timedelta64[ns]
diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index 549d49aaa1853..e3fc3a24cfe00 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -318,6 +318,7 @@ Bug fixes ~~~~~~~~~ - Fixed bug in :class:`SparseDtype` for equal comparison with na fill value. (:issue:`54770`) - Fixed bug in :meth:`.DataFrameGroupBy.median` where nat values gave an incorrect result. (:issue:`57926`) +- Fixed bug in :meth:`DataFrame.cumsum` which was raising ``IndexError`` if dtype is ``timedelta64[ns]`` (:issue:`57956`) - Fixed bug in :meth:`DataFrame.join` inconsistently setting result index name (:issue:`55815`) - Fixed bug in :meth:`DataFrame.to_string` that raised ``StopIteration`` with nested DataFrames. (:issue:`16098`) - Fixed bug in :meth:`DataFrame.update` bool dtype being converted to object (:issue:`55509`) diff --git a/pandas/core/array_algos/datetimelike_accumulations.py b/pandas/core/array_algos/datetimelike_accumulations.py index 55942f2c9350d..c3a7c2e4fefb2 100644 --- a/pandas/core/array_algos/datetimelike_accumulations.py +++ b/pandas/core/array_algos/datetimelike_accumulations.py @@ -49,7 +49,8 @@ def _cum_func( if not skipna: mask = np.maximum.accumulate(mask) - result = func(y) + # GH 57956 + result = func(y, axis=0) result[mask] = iNaT if values.dtype.kind in "mM": diff --git a/pandas/tests/series/test_cumulative.py b/pandas/tests/series/test_cumulative.py index 68d7fd8b90df2..9b7b08127a550 100644 --- a/pandas/tests/series/test_cumulative.py +++ b/pandas/tests/series/test_cumulative.py @@ -91,6 +91,25 @@ def test_cummin_cummax_datetimelike(self, ts, method, skipna, exp_tdi): result = getattr(ser, method)(skipna=skipna) tm.assert_series_equal(expected, result) + def test_cumsum_datetimelike(self): + # GH#57956 + df = pd.DataFrame( + [ + [pd.Timedelta(0), pd.Timedelta(days=1)], + [pd.Timedelta(days=2), pd.NaT], + [pd.Timedelta(hours=-6), pd.Timedelta(hours=12)], + ] + ) + result = df.cumsum() + expected = pd.DataFrame( + [ + [pd.Timedelta(0), pd.Timedelta(days=1)], + [pd.Timedelta(days=2), pd.NaT], + [pd.Timedelta(days=1, hours=18), pd.Timedelta(days=1, hours=12)], + ] + ) + tm.assert_frame_equal(result, expected) + @pytest.mark.parametrize( "func, exp", [
- [x] closes #57956 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/58028
2024-03-27T10:31:59Z
2024-03-28T00:13:58Z
2024-03-28T00:13:58Z
2024-03-28T00:14:05Z
REGR: Performance of DataFrame.stack where columns are not a MultiIndex
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index afb0c489c9c94..0a2f7fe43b4b3 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -932,14 +932,18 @@ def stack_v3(frame: DataFrame, level: list[int]) -> Series | DataFrame: if len(frame.columns) == 1: data = frame.copy() else: - # Take the data from frame corresponding to this idx value - if len(level) == 1: - idx = (idx,) - gen = iter(idx) - column_indexer = tuple( - next(gen) if k in set_levels else slice(None) - for k in range(frame.columns.nlevels) - ) + if not isinstance(frame.columns, MultiIndex) and not isinstance(idx, tuple): + # GH#57750 - if the frame is an Index with tuples, .loc below will fail + column_indexer = idx + else: + # Take the data from frame corresponding to this idx value + if len(level) == 1: + idx = (idx,) + gen = iter(idx) + column_indexer = tuple( + next(gen) if k in set_levels else slice(None) + for k in range(frame.columns.nlevels) + ) data = frame.loc[:, column_indexer] if len(level) < frame.columns.nlevels:
- [x] closes #57302 (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Thanks @DeaMariaLeon for identifying this regression and @jorisvandenbossche for the solution used here. ASVs ``` | Change | Before [e51039af] <enh_fillna_allow_none~1> | After [664c54b8] <regr_perf_stack> | Ratio | Benchmark (Parameter) | |----------|-----------------------------------------------|--------------------------------------|---------|------------------------------------------------------------------------| | - | 304±2ms | 44.2±0.4ms | 0.15 | reshape.ReshapeExtensionDtype.time_stack('datetime64[ns, US/Pacific]') | | - | 304±2ms | 43.7±0.2ms | 0.14 | reshape.ReshapeExtensionDtype.time_stack('Period[s]') | | - | 294±0.7ms | 40.8±0.3ms | 0.14 | reshape.ReshapeMaskedArrayDtype.time_stack('Float64') | | - | 291±2ms | 40.7±0.2ms | 0.14 | reshape.ReshapeMaskedArrayDtype.time_stack('Int64') | ```
https://api.github.com/repos/pandas-dev/pandas/pulls/58027
2024-03-27T03:43:05Z
2024-03-27T17:29:16Z
2024-03-27T17:29:16Z
2024-03-27T17:41:30Z
CLN: remove no-longer-needed warning filters
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index ebcb700e656f6..a7545fb8d98de 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -9660,13 +9660,7 @@ def _where( # make sure we are boolean fill_value = bool(inplace) - with warnings.catch_warnings(): - warnings.filterwarnings( - "ignore", - "Downcasting object dtype arrays", - category=FutureWarning, - ) - cond = cond.fillna(fill_value) + cond = cond.fillna(fill_value) cond = cond.infer_objects() msg = "Boolean array expected for the condition, not {dtype}" diff --git a/pandas/io/formats/xml.py b/pandas/io/formats/xml.py index 702430642a597..47f162e93216d 100644 --- a/pandas/io/formats/xml.py +++ b/pandas/io/formats/xml.py @@ -11,7 +11,6 @@ Any, final, ) -import warnings from pandas.errors import AbstractMethodError from pandas.util._decorators import ( @@ -208,13 +207,7 @@ def _process_dataframe(self) -> dict[int | str, dict[str, Any]]: df = df.reset_index() if self.na_rep is not None: - with warnings.catch_warnings(): - warnings.filterwarnings( - "ignore", - "Downcasting object dtype arrays", - category=FutureWarning, - ) - df = df.fillna(self.na_rep) + df = df.fillna(self.na_rep) return df.to_dict(orient="index") diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index 8f4028c1ead3a..13d74e935f786 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -16,7 +16,6 @@ final, overload, ) -import warnings import numpy as np @@ -1173,13 +1172,7 @@ def _try_convert_data( if all(notna(data)): return data, False - with warnings.catch_warnings(): - warnings.filterwarnings( - "ignore", - "Downcasting object dtype arrays", - category=FutureWarning, - ) - filled = data.fillna(np.nan) + filled = data.fillna(np.nan) return filled, True diff --git a/pandas/io/stata.py b/pandas/io/stata.py index fe8b4896d097e..3ec077806d6c4 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -2887,13 +2887,7 @@ def _prepare_data(self) -> np.rec.recarray: for i, col in enumerate(data): typ = typlist[i] if typ <= self._max_string_length: - with warnings.catch_warnings(): - warnings.filterwarnings( - "ignore", - "Downcasting object dtype arrays", - category=FutureWarning, - ) - dc = data[col].fillna("") + dc = data[col].fillna("") data[col] = dc.apply(_pad_bytes, args=(typ,)) stype = f"S{typ}" dtypes[col] = stype diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index dbd2743345a38..700136bca8da7 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -1725,13 +1725,7 @@ def _kind(self) -> Literal["area"]: def __init__(self, data, **kwargs) -> None: kwargs.setdefault("stacked", True) - with warnings.catch_warnings(): - warnings.filterwarnings( - "ignore", - "Downcasting object dtype arrays", - category=FutureWarning, - ) - data = data.fillna(value=0) + data = data.fillna(value=0) LinePlot.__init__(self, data, **kwargs) if not self.stacked: diff --git a/pandas/tests/extension/test_masked.py b/pandas/tests/extension/test_masked.py index 5481e50de10bb..69ce42203d510 100644 --- a/pandas/tests/extension/test_masked.py +++ b/pandas/tests/extension/test_masked.py @@ -14,8 +14,6 @@ """ -import warnings - import numpy as np import pytest @@ -215,13 +213,7 @@ def _cast_pointwise_result(self, op_name: str, obj, other, pointwise_result): if sdtype.kind in "iu": if op_name in ("__rtruediv__", "__truediv__", "__div__"): - with warnings.catch_warnings(): - warnings.filterwarnings( - "ignore", - "Downcasting object dtype arrays", - category=FutureWarning, - ) - filled = expected.fillna(np.nan) + filled = expected.fillna(np.nan) expected = filled.astype("Float64") else: # combine method result in 'biggest' (int64) dtype diff --git a/pandas/tests/frame/indexing/test_where.py b/pandas/tests/frame/indexing/test_where.py index 8b3596debc0b8..aeffc4835a347 100644 --- a/pandas/tests/frame/indexing/test_where.py +++ b/pandas/tests/frame/indexing/test_where.py @@ -96,7 +96,6 @@ def test_where_upcasting(self): tm.assert_series_equal(result, expected) - @pytest.mark.filterwarnings("ignore:Downcasting object dtype arrays:FutureWarning") def test_where_alignment(self, where_frame, float_string_frame): # aligning def _check_align(df, cond, other, check_dtypes=True): @@ -171,7 +170,6 @@ def test_where_invalid(self): with pytest.raises(ValueError, match=msg): df.mask(0) - @pytest.mark.filterwarnings("ignore:Downcasting object dtype arrays:FutureWarning") def test_where_set(self, where_frame, float_string_frame, mixed_int_frame): # where inplace diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py index 408cb0ab6fc5c..5b9dd9e5b8aa6 100644 --- a/pandas/tests/frame/test_reductions.py +++ b/pandas/tests/frame/test_reductions.py @@ -1272,7 +1272,6 @@ def test_any_all_bool_with_na( ): getattr(bool_frame_with_na, all_boolean_reductions)(axis=axis, bool_only=False) - @pytest.mark.filterwarnings("ignore:Downcasting object dtype arrays:FutureWarning") def test_any_all_bool_frame(self, all_boolean_reductions, bool_frame_with_na): # GH#12863: numpy gives back non-boolean data for object type # so fill NaNs to compare with pandas behavior diff --git a/pandas/tests/frame/test_stack_unstack.py b/pandas/tests/frame/test_stack_unstack.py index 0b6b38340de9e..09235f154b188 100644 --- a/pandas/tests/frame/test_stack_unstack.py +++ b/pandas/tests/frame/test_stack_unstack.py @@ -1223,7 +1223,6 @@ def test_stack_preserve_categorical_dtype_values(self, future_stack): @pytest.mark.filterwarnings( "ignore:The previous implementation of stack is deprecated" ) - @pytest.mark.filterwarnings("ignore:Downcasting object dtype arrays:FutureWarning") @pytest.mark.parametrize( "index", [ diff --git a/pandas/tests/groupby/test_numeric_only.py b/pandas/tests/groupby/test_numeric_only.py index 55a79863f206b..33cdd1883e1b9 100644 --- a/pandas/tests/groupby/test_numeric_only.py +++ b/pandas/tests/groupby/test_numeric_only.py @@ -310,7 +310,6 @@ def test_numeric_only(kernel, has_arg, numeric_only, keys): method(*args, **kwargs) -@pytest.mark.filterwarnings("ignore:Downcasting object dtype arrays:FutureWarning") @pytest.mark.parametrize("dtype", [bool, int, float, object]) def test_deprecate_numeric_only_series(dtype, groupby_func, request): # GH#46560 diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py index 0ace43f608b5a..7b45a267a4572 100644 --- a/pandas/tests/series/test_api.py +++ b/pandas/tests/series/test_api.py @@ -195,7 +195,6 @@ def test_series_datetimelike_attribute_access_invalid(self): with pytest.raises(AttributeError, match=msg): ser.weekday - @pytest.mark.filterwarnings("ignore:Downcasting object dtype arrays:FutureWarning") @pytest.mark.parametrize( "kernel, has_numeric_only", [ diff --git a/pandas/tests/series/test_logical_ops.py b/pandas/tests/series/test_logical_ops.py index 757f63dd86904..b76b69289b72f 100644 --- a/pandas/tests/series/test_logical_ops.py +++ b/pandas/tests/series/test_logical_ops.py @@ -15,7 +15,6 @@ class TestSeriesLogicalOps: - @pytest.mark.filterwarnings("ignore:Downcasting object dtype arrays:FutureWarning") @pytest.mark.parametrize("bool_op", [operator.and_, operator.or_, operator.xor]) def test_bool_operators_with_nas(self, bool_op): # boolean &, |, ^ should work with object arrays and propagate NAs
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/58025
2024-03-27T03:12:58Z
2024-03-27T17:34:03Z
2024-03-27T17:34:03Z
2024-03-27T17:41:09Z
DEPR: mismatched null-likes in tm.assert_foo_equal
diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index 4b7b075ceafaf..549d49aaa1853 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -211,6 +211,7 @@ Removal of prior version deprecations/changes - All arguments in :meth:`Index.sort_values` are now keyword only (:issue:`56493`) - All arguments in :meth:`Series.to_dict` are now keyword only (:issue:`56493`) - Changed the default value of ``observed`` in :meth:`DataFrame.groupby` and :meth:`Series.groupby` to ``True`` (:issue:`51811`) +- Enforce deprecation in :func:`testing.assert_series_equal` and :func:`testing.assert_frame_equal` with object dtype and mismatched null-like values, which are now considered not-equal (:issue:`18463`) - Enforced deprecation disallowing parsing datetimes with mixed time zones unless user passes ``utc=True`` to :func:`to_datetime` (:issue:`57275`) - Enforced deprecation in :meth:`Series.value_counts` and :meth:`Index.value_counts` with object dtype performing dtype inference on the ``.index`` of the result (:issue:`56161`) - Enforced deprecation of :meth:`.DataFrameGroupBy.get_group` and :meth:`.SeriesGroupBy.get_group` allowing the ``name`` argument to be a non-tuple when grouping by a list of length 1 (:issue:`54155`) diff --git a/pandas/_libs/testing.pyx b/pandas/_libs/testing.pyx index aed0f4b082d4e..cfd31fa610e69 100644 --- a/pandas/_libs/testing.pyx +++ b/pandas/_libs/testing.pyx @@ -1,6 +1,5 @@ import cmath import math -import warnings import numpy as np @@ -18,7 +17,6 @@ from pandas._libs.util cimport ( is_real_number_object, ) -from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.missing import array_equivalent @@ -188,15 +186,7 @@ cpdef assert_almost_equal(a, b, return True elif checknull(b): # GH#18463 - warnings.warn( - f"Mismatched null-like values {a} and {b} found. In a future " - "version, pandas equality-testing functions " - "(e.g. assert_frame_equal) will consider these not-matching " - "and raise.", - FutureWarning, - stacklevel=find_stack_level(), - ) - return True + raise AssertionError(f"Mismatched null-like values {a} != {b}") raise AssertionError(f"{a} != {b}") elif checknull(b): raise AssertionError(f"{a} != {b}") diff --git a/pandas/tests/util/test_assert_almost_equal.py b/pandas/tests/util/test_assert_almost_equal.py index 1688e77ccd2d7..bcc2e4e03f367 100644 --- a/pandas/tests/util/test_assert_almost_equal.py +++ b/pandas/tests/util/test_assert_almost_equal.py @@ -311,7 +311,7 @@ def test_assert_almost_equal_inf(a, b): @pytest.mark.parametrize("left", objs) @pytest.mark.parametrize("right", objs) -def test_mismatched_na_assert_almost_equal_deprecation(left, right): +def test_mismatched_na_assert_almost_equal(left, right): left_arr = np.array([left], dtype=object) right_arr = np.array([right], dtype=object) @@ -331,7 +331,7 @@ def test_mismatched_na_assert_almost_equal_deprecation(left, right): ) else: - with tm.assert_produces_warning(FutureWarning, match=msg): + with pytest.raises(AssertionError, match=msg): _assert_almost_equal_both(left, right, check_dtype=False) # TODO: to get the same deprecation in assert_numpy_array_equal we need @@ -339,11 +339,11 @@ def test_mismatched_na_assert_almost_equal_deprecation(left, right): # TODO: to get the same deprecation in assert_index_equal we need to # change/deprecate array_equivalent_object to be stricter, as # assert_index_equal uses Index.equal which uses array_equivalent. - with tm.assert_produces_warning(FutureWarning, match=msg): + with pytest.raises(AssertionError, match="Series are different"): tm.assert_series_equal( Series(left_arr, dtype=object), Series(right_arr, dtype=object) ) - with tm.assert_produces_warning(FutureWarning, match=msg): + with pytest.raises(AssertionError, match="DataFrame.iloc.* are different"): tm.assert_frame_equal( DataFrame(left_arr, dtype=object), DataFrame(right_arr, dtype=object) )
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/58023
2024-03-26T23:35:57Z
2024-03-27T00:39:15Z
2024-03-27T00:39:15Z
2024-03-27T02:57:11Z
DEPR: freq keyword in PeriodArray
diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index 4b7b075ceafaf..c9c5cdc6ec4df 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -206,6 +206,7 @@ Removal of prior version deprecations/changes - :meth:`SeriesGroupBy.agg` no longer pins the name of the group to the input passed to the provided ``func`` (:issue:`51703`) - All arguments except ``name`` in :meth:`Index.rename` are now keyword only (:issue:`56493`) - All arguments except the first ``path``-like argument in IO writers are now keyword only (:issue:`54229`) +- Removed "freq" keyword from :class:`PeriodArray` constructor, use "dtype" instead (:issue:`52462`) - Removed the "closed" and "normalize" keywords in :meth:`DatetimeIndex.__new__` (:issue:`52628`) - Removed the "closed" and "unit" keywords in :meth:`TimedeltaIndex.__new__` (:issue:`52628`, :issue:`55499`) - All arguments in :meth:`Index.sort_values` are now keyword only (:issue:`56493`) diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index e73eba710ec39..8baf363b909fb 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -54,7 +54,6 @@ cache_readonly, doc, ) -from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, @@ -135,11 +134,6 @@ class PeriodArray(dtl.DatelikeOps, libperiod.PeriodMixin): # type: ignore[misc] dtype : PeriodDtype, optional A PeriodDtype instance from which to extract a `freq`. If both `freq` and `dtype` are specified, then the frequencies must match. - freq : str or DateOffset - The `freq` to use for the array. Mostly applicable when `values` - is an ndarray of integers, when `freq` is required. When `values` - is a PeriodArray (or box around), it's checked that ``values.freq`` - matches `freq`. copy : bool, default False Whether to copy the ordinals before storing. @@ -224,20 +218,7 @@ def _scalar_type(self) -> type[Period]: # -------------------------------------------------------------------- # Constructors - def __init__( - self, values, dtype: Dtype | None = None, freq=None, copy: bool = False - ) -> None: - if freq is not None: - # GH#52462 - warnings.warn( - "The 'freq' keyword in the PeriodArray constructor is deprecated " - "and will be removed in a future version. Pass 'dtype' instead", - FutureWarning, - stacklevel=find_stack_level(), - ) - freq = validate_dtype_freq(dtype, freq) - dtype = PeriodDtype(freq) - + def __init__(self, values, dtype: Dtype | None = None, copy: bool = False) -> None: if dtype is not None: dtype = pandas_dtype(dtype) if not isinstance(dtype, PeriodDtype): diff --git a/pandas/tests/arrays/period/test_constructors.py b/pandas/tests/arrays/period/test_constructors.py index d034162f1b46e..63b0e456c4566 100644 --- a/pandas/tests/arrays/period/test_constructors.py +++ b/pandas/tests/arrays/period/test_constructors.py @@ -135,17 +135,6 @@ def test_from_td64nat_sequence_raises(): pd.DataFrame(arr, dtype=dtype) -def test_freq_deprecated(): - # GH#52462 - data = np.arange(5).astype(np.int64) - msg = "The 'freq' keyword in the PeriodArray constructor is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg): - res = PeriodArray(data, freq="M") - - expected = PeriodArray(data, dtype="period[M]") - tm.assert_equal(res, expected) - - def test_period_array_from_datetime64(): arr = np.array( ["2020-01-01T00:00:00", "2020-02-02T00:00:00"], dtype="datetime64[ns]"
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/58022
2024-03-26T23:24:10Z
2024-03-27T17:35:02Z
2024-03-27T17:35:02Z
2024-03-27T17:40:55Z
BUG: #57972 kurtosis small sample size low variance false positive cutoff
diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index 4b7b075ceafaf..93759feef4b3e 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -324,6 +324,7 @@ Bug fixes - Fixed bug in :meth:`Series.diff` allowing non-integer values for the ``periods`` argument. (:issue:`56607`) - Fixed bug in :meth:`Series.rank` that doesn't preserve missing values for nullable integers when ``na_option='keep'``. (:issue:`56976`) - Fixed bug in :meth:`Series.replace` and :meth:`DataFrame.replace` inconsistently replacing matching instances when ``regex=True`` and missing values are present. (:issue:`56599`) +- Fixed bug in :meth:`Series.rolling.kurt` with small sized values arrays with low variance getting zeroed out even when numerically stable (:issue:`57972`) Categorical ^^^^^^^^^^^ diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx index 6365c030b695b..27d2aae48579f 100644 --- a/pandas/_libs/window/aggregations.pyx +++ b/pandas/_libs/window/aggregations.pyx @@ -712,7 +712,8 @@ cdef float64_t calc_kurt(int64_t minp, int64_t nobs, # if the variance is less than 1e-14, it could be # treat as zero, here we follow the original # skew/kurt behaviour to check B <= 1e-14 - if B <= 1e-14: + # #57972: for small arrays the cutoff can be lowered + if B <= 1e-14 and nobs > 100 or B <= 1e-16: result = NaN else: K = (dnobs * dnobs - 1.) * D / (B * B) - 3 * ((dnobs - 1.) ** 2) diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index b68337d9e0de9..4a16b09439ee9 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -1357,9 +1357,13 @@ def nankurt( # floating point error # # #18044 in _libs/windows.pyx calc_kurt follow this behavior - # to fix the fperr to treat denom <1e-14 as zero - numerator = _zero_out_fperr(numerator) - denominator = _zero_out_fperr(denominator) + # to fix the fperr to treat denom <1e-14 as zero (default cutoff) + # GH-57972 set cutoff lower for small arrays to prevent cutoff of otherwise + # numerically stable values + length = count[0] if isinstance(count, np.ndarray) else count + cutoff = 1e-14 if length > 100 else 1e-16 + numerator = _zero_out_fperr(numerator, cutoff) + denominator = _zero_out_fperr(denominator, cutoff) if not isinstance(denominator, np.ndarray): # if ``denom`` is a scalar, check these corner cases first before @@ -1576,12 +1580,12 @@ def check_below_min_count( return False -def _zero_out_fperr(arg): +def _zero_out_fperr(arg, cutoff=1e-14): # #18044 reference this behavior to fix rolling skew/kurt issue if isinstance(arg, np.ndarray): - return np.where(np.abs(arg) < 1e-14, 0, arg) + return np.where(np.abs(arg) < cutoff, 0, arg) else: - return arg.dtype.type(0) if np.abs(arg) < 1e-14 else arg + return arg.dtype.type(0) if np.abs(arg) < cutoff else arg @disallow("M8", "m8") diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index ce41f1e76de79..537de5832ed23 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -1105,6 +1105,18 @@ def test_nans_skipna(self, samples, actual_kurt): kurt = nanops.nankurt(samples, skipna=True) tm.assert_almost_equal(kurt, actual_kurt) + def test_small_arrays_with_low_variance(self): + # GH-57972 + # small sample arrays with low variance have a lower threshold for breakdown + # of numerical stability and should be handled accordingly + low_var_samples = np.array( + [-2.05191341e-05] + [0.0e00] * 4 + [-4.10391103e-05] + [0.0e00] * 23 + ) + # calculated with scipy.status kurtosis(low_var_samples, bias=False) + scipy_kurt = 18.087646853025614 + kurt = nanops.nankurt(low_var_samples) + tm.assert_almost_equal(kurt, scipy_kurt) + @property def prng(self): return np.random.default_rng(2)
Not much activity in the issue #57972, but this seems to be a minor problem with the kurtosis implementation. - [x] closes #57972 (Replace xxxx with the GitHub issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/58020
2024-03-26T23:10:30Z
2024-03-27T09:43:44Z
null
2024-03-27T09:44:03Z
CLN/PERF: Simplify argmin/argmax
diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py index 7f4e6f6666382..930ee83aea00b 100644 --- a/pandas/core/arrays/_mixins.py +++ b/pandas/core/arrays/_mixins.py @@ -210,7 +210,7 @@ def argmin(self, axis: AxisInt = 0, skipna: bool = True): # type: ignore[overri # override base class by adding axis keyword validate_bool_kwarg(skipna, "skipna") if not skipna and self._hasna: - raise NotImplementedError + raise ValueError("Encountered an NA value with skipna=False") return nargminmax(self, "argmin", axis=axis) # Signature of "argmax" incompatible with supertype "ExtensionArray" @@ -218,7 +218,7 @@ def argmax(self, axis: AxisInt = 0, skipna: bool = True): # type: ignore[overri # override base class by adding axis keyword validate_bool_kwarg(skipna, "skipna") if not skipna and self._hasna: - raise NotImplementedError + raise ValueError("Encountered an NA value with skipna=False") return nargminmax(self, "argmax", axis=axis) def unique(self) -> Self: diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 76615704f2e33..fdc839225a557 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -885,7 +885,7 @@ def argmin(self, skipna: bool = True) -> int: # 2. argmin itself : total control over sorting. validate_bool_kwarg(skipna, "skipna") if not skipna and self._hasna: - raise NotImplementedError + raise ValueError("Encountered an NA value with skipna=False") return nargminmax(self, "argmin") def argmax(self, skipna: bool = True) -> int: @@ -919,7 +919,7 @@ def argmax(self, skipna: bool = True) -> int: # 2. argmax itself : total control over sorting. validate_bool_kwarg(skipna, "skipna") if not skipna and self._hasna: - raise NotImplementedError + raise ValueError("Encountered an NA value with skipna=False") return nargminmax(self, "argmax") def interpolate( diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index bdcb3219a9875..2a96423017bb7 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -1623,13 +1623,13 @@ def _argmin_argmax(self, kind: Literal["argmin", "argmax"]) -> int: def argmax(self, skipna: bool = True) -> int: validate_bool_kwarg(skipna, "skipna") if not skipna and self._hasna: - raise NotImplementedError + raise ValueError("Encountered an NA value with skipna=False") return self._argmin_argmax("argmax") def argmin(self, skipna: bool = True) -> int: validate_bool_kwarg(skipna, "skipna") if not skipna and self._hasna: - raise NotImplementedError + raise ValueError("Encountered an NA value with skipna=False") return self._argmin_argmax("argmin") # ------------------------------------------------------------------------ diff --git a/pandas/core/base.py b/pandas/core/base.py index 263265701691b..0dffc0254c550 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -735,13 +735,8 @@ def argmax( nv.validate_minmax_axis(axis) skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs) - if skipna and len(delegate) > 0 and isna(delegate).all(): - raise ValueError("Encountered all NA values") - elif not skipna and isna(delegate).any(): - raise ValueError("Encountered an NA value with skipna=False") - if isinstance(delegate, ExtensionArray): - return delegate.argmax() + return delegate.argmax(skipna=skipna) else: result = nanops.nanargmax(delegate, skipna=skipna) # error: Incompatible return value type (got "Union[int, ndarray]", expected @@ -754,15 +749,10 @@ def argmin( ) -> int: delegate = self._values nv.validate_minmax_axis(axis) - skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs) - - if skipna and len(delegate) > 0 and isna(delegate).all(): - raise ValueError("Encountered all NA values") - elif not skipna and isna(delegate).any(): - raise ValueError("Encountered an NA value with skipna=False") + skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs) if isinstance(delegate, ExtensionArray): - return delegate.argmin() + return delegate.argmin(skipna=skipna) else: result = nanops.nanargmin(delegate, skipna=skipna) # error: Incompatible return value type (got "Union[int, ndarray]", expected diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 76dd19a9424f5..c57c7d1fe1232 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -6975,11 +6975,11 @@ def argmin(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: nv.validate_minmax_axis(axis) if not self._is_multi and self.hasnans: - # Take advantage of cache - if self._isnan.all(): - raise ValueError("Encountered all NA values") - elif not skipna: + if not skipna: raise ValueError("Encountered an NA value with skipna=False") + elif self._isnan.all(): + raise ValueError("Encountered all NA values") + return super().argmin(skipna=skipna) @Appender(IndexOpsMixin.argmax.__doc__) @@ -6988,11 +6988,10 @@ def argmax(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: nv.validate_minmax_axis(axis) if not self._is_multi and self.hasnans: - # Take advantage of cache - if self._isnan.all(): - raise ValueError("Encountered all NA values") - elif not skipna: + if not skipna: raise ValueError("Encountered an NA value with skipna=False") + elif self._isnan.all(): + raise ValueError("Encountered all NA values") return super().argmax(skipna=skipna) def min(self, axis=None, skipna: bool = True, *args, **kwargs): diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index b68337d9e0de9..623d61a9b2ea9 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -1439,20 +1439,15 @@ def _maybe_arg_null_out( return result if axis is None or not getattr(result, "ndim", False): - if skipna: - if mask.all(): - raise ValueError("Encountered all NA values") - else: - if mask.any(): - raise ValueError("Encountered an NA value with skipna=False") + if skipna and mask.all(): + raise ValueError("Encountered all NA values") + elif not skipna and mask.any(): + raise ValueError("Encountered an NA value with skipna=False") else: - na_mask = mask.all(axis) - if na_mask.any(): + if skipna and mask.all(axis).any(): raise ValueError("Encountered all NA values") - elif not skipna: - na_mask = mask.any(axis) - if na_mask.any(): - raise ValueError("Encountered an NA value with skipna=False") + elif not skipna and mask.any(axis).any(): + raise ValueError("Encountered an NA value with skipna=False") return result diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py index 26638c6160b7b..225a3301b8b8c 100644 --- a/pandas/tests/extension/base/methods.py +++ b/pandas/tests/extension/base/methods.py @@ -191,10 +191,10 @@ def test_argmax_argmin_no_skipna_notimplemented(self, data_missing_for_sorting): # GH#38733 data = data_missing_for_sorting - with pytest.raises(NotImplementedError, match=""): + with pytest.raises(ValueError, match="Encountered an NA value"): data.argmin(skipna=False) - with pytest.raises(NotImplementedError, match=""): + with pytest.raises(ValueError, match="Encountered an NA value"): data.argmax(skipna=False) @pytest.mark.parametrize( diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py index 408cb0ab6fc5c..c5c7ffab9b4ae 100644 --- a/pandas/tests/frame/test_reductions.py +++ b/pandas/tests/frame/test_reductions.py @@ -1066,7 +1066,7 @@ def test_idxmin(self, float_frame, int_frame, skipna, axis): frame.iloc[15:20, -2:] = np.nan for df in [frame, int_frame]: if (not skipna or axis == 1) and df is not int_frame: - if axis == 1: + if skipna: msg = "Encountered all NA values" else: msg = "Encountered an NA value" @@ -1116,7 +1116,7 @@ def test_idxmax(self, float_frame, int_frame, skipna, axis): frame.iloc[15:20, -2:] = np.nan for df in [frame, int_frame]: if (skipna is False or axis == 1) and df is frame: - if axis == 1: + if skipna: msg = "Encountered all NA values" else: msg = "Encountered an NA value" diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py index b10319f5380e7..726ed4ad8a399 100644 --- a/pandas/tests/reductions/test_reductions.py +++ b/pandas/tests/reductions/test_reductions.py @@ -171,9 +171,9 @@ def test_argminmax(self): obj.argmin() with pytest.raises(ValueError, match="Encountered all NA values"): obj.argmax() - with pytest.raises(ValueError, match="Encountered all NA values"): + with pytest.raises(ValueError, match="Encountered an NA value"): obj.argmin(skipna=False) - with pytest.raises(ValueError, match="Encountered all NA values"): + with pytest.raises(ValueError, match="Encountered an NA value"): obj.argmax(skipna=False) obj = Index([NaT, datetime(2011, 11, 1), datetime(2011, 11, 2), NaT]) @@ -189,9 +189,9 @@ def test_argminmax(self): obj.argmin() with pytest.raises(ValueError, match="Encountered all NA values"): obj.argmax() - with pytest.raises(ValueError, match="Encountered all NA values"): + with pytest.raises(ValueError, match="Encountered an NA value"): obj.argmin(skipna=False) - with pytest.raises(ValueError, match="Encountered all NA values"): + with pytest.raises(ValueError, match="Encountered an NA value"): obj.argmax(skipna=False) @pytest.mark.parametrize("op, expected_col", [["max", "a"], ["min", "b"]]) @@ -856,7 +856,8 @@ def test_idxmin(self): # all NaNs allna = string_series * np.nan - with pytest.raises(ValueError, match="Encountered all NA values"): + msg = "Encountered all NA values" + with pytest.raises(ValueError, match=msg): allna.idxmin() # datetime64[ns] @@ -888,7 +889,8 @@ def test_idxmax(self): # all NaNs allna = string_series * np.nan - with pytest.raises(ValueError, match="Encountered all NA values"): + msg = "Encountered all NA values" + with pytest.raises(ValueError, match=msg): allna.idxmax() s = Series(date_range("20130102", periods=6)) @@ -1146,12 +1148,12 @@ def test_idxminmax_object_dtype(self, using_infer_string): msg = "'>' not supported between instances of 'float' and 'str'" with pytest.raises(TypeError, match=msg): ser3.idxmax() - with pytest.raises(ValueError, match="Encountered an NA value"): + with pytest.raises(TypeError, match=msg): ser3.idxmax(skipna=False) msg = "'<' not supported between instances of 'float' and 'str'" with pytest.raises(TypeError, match=msg): ser3.idxmin() - with pytest.raises(ValueError, match="Encountered an NA value"): + with pytest.raises(TypeError, match=msg): ser3.idxmin(skipna=False) def test_idxminmax_object_frame(self):
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Ref: https://github.com/pandas-dev/pandas/pull/57971#discussion_r1539849666 ASVs against main: ``` | Change | Before [b63ae8c7] | After [46bbd5eb] <cln_argmin_argmax> | Ratio | Benchmark (Parameter) | |----------|----------------------|----------------------------------------|---------|---------------------------------------------------------------| | - | 184±1μs | 138±3μs | 0.75 | series_methods.NanOps.time_func('argmax', 1000000, 'int32') | | - | 14.4±0.1μs | 10.5±0.2μs | 0.73 | series_methods.NanOps.time_func('argmax', 1000, 'float64') | | - | 1.23±0.06ms | 863±20μs | 0.7 | series_methods.NanOps.time_func('argmax', 1000000, 'float64') | | - | 77.9±0.6μs | 40.8±0.7μs | 0.52 | series_methods.NanOps.time_func('argmax', 1000000, 'int8') | | - | 7.42±0.2μs | 2.44±0.06μs | 0.33 | series_methods.NanOps.time_func('argmax', 1000, 'int64') | | - | 7.18±0.2μs | 2.29±0.02μs | 0.32 | series_methods.NanOps.time_func('argmax', 1000, 'int32') | | - | 7.25±0.1μs | 2.29±0.03μs | 0.32 | series_methods.NanOps.time_func('argmax', 1000, 'int8') | ``` ASVs against 2.2.x show no perf change.
https://api.github.com/repos/pandas-dev/pandas/pulls/58019
2024-03-26T21:47:08Z
2024-04-01T18:13:11Z
2024-04-01T18:13:11Z
2024-04-02T02:19:26Z
PERF: Allow Index.to_frame to return RangeIndex columns
diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index 26dd6f83ad44a..be7c3277759f5 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -299,6 +299,7 @@ Performance improvements - Performance improvement in :meth:`DataFrameGroupBy.ffill`, :meth:`DataFrameGroupBy.bfill`, :meth:`SeriesGroupBy.ffill`, and :meth:`SeriesGroupBy.bfill` (:issue:`56902`) - Performance improvement in :meth:`Index.join` by propagating cached attributes in cases where the result matches one of the inputs (:issue:`57023`) - Performance improvement in :meth:`Index.take` when ``indices`` is a full range indexer from zero to length of index (:issue:`56806`) +- Performance improvement in :meth:`Index.to_frame` returning a :class:`RangeIndex` columns of a :class:`Index` when possible. (:issue:`58018`) - Performance improvement in :meth:`MultiIndex.equals` for equal length indexes (:issue:`56990`) - Performance improvement in :meth:`RangeIndex.__getitem__` with a boolean mask or integers returning a :class:`RangeIndex` instead of a :class:`Index` when possible. (:issue:`57588`) - Performance improvement in :meth:`RangeIndex.append` when appending the same index (:issue:`57252`) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 76dd19a9424f5..e510d487ac954 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1374,16 +1374,19 @@ def _format_attrs(self) -> list[tuple[str_t, str_t | int | bool | None]]: return attrs @final - def _get_level_names(self) -> Hashable | Sequence[Hashable]: + def _get_level_names(self) -> range | Sequence[Hashable]: """ Return a name or list of names with None replaced by the level number. """ if self._is_multi: - return [ - level if name is None else name for level, name in enumerate(self.names) - ] + return maybe_sequence_to_range( + [ + level if name is None else name + for level, name in enumerate(self.names) + ] + ) else: - return 0 if self.name is None else self.name + return range(1) if self.name is None else [self.name] @final def _mpl_repr(self) -> np.ndarray: @@ -1630,8 +1633,11 @@ def to_frame( from pandas import DataFrame if name is lib.no_default: - name = self._get_level_names() - result = DataFrame({name: self}, copy=False) + result_name = self._get_level_names() + else: + result_name = Index([name]) # type: ignore[assignment] + result = DataFrame(self, copy=False) + result.columns = result_name if index: result.index = self diff --git a/pandas/tests/indexes/multi/test_conversion.py b/pandas/tests/indexes/multi/test_conversion.py index 3c2ca045d6f99..f6b10c989326f 100644 --- a/pandas/tests/indexes/multi/test_conversion.py +++ b/pandas/tests/indexes/multi/test_conversion.py @@ -5,6 +5,7 @@ from pandas import ( DataFrame, MultiIndex, + RangeIndex, ) import pandas._testing as tm @@ -148,6 +149,13 @@ def test_to_frame_duplicate_labels(): tm.assert_frame_equal(result, expected) +def test_to_frame_column_rangeindex(): + mi = MultiIndex.from_arrays([[1, 2], ["a", "b"]]) + result = mi.to_frame().columns + expected = RangeIndex(2) + tm.assert_index_equal(result, expected, exact=True) + + def test_to_flat_index(idx): expected = pd.Index( ( diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py index eb0010066a7f6..a2dee61295c74 100644 --- a/pandas/tests/indexes/test_common.py +++ b/pandas/tests/indexes/test_common.py @@ -508,3 +508,17 @@ def test_compare_read_only_array(): idx = pd.Index(arr) result = idx > 69 assert result.dtype == bool + + +def test_to_frame_column_rangeindex(): + idx = pd.Index([1]) + result = idx.to_frame().columns + expected = RangeIndex(1) + tm.assert_index_equal(result, expected, exact=True) + + +def test_to_frame_name_tuple_multiindex(): + idx = pd.Index([1]) + result = idx.to_frame(name=(1, 2)) + expected = pd.DataFrame([1], columns=MultiIndex.from_arrays([[1], [2]]), index=idx) + tm.assert_frame_equal(result, expected)
Discovered in https://github.com/pandas-dev/pandas/pull/57441
https://api.github.com/repos/pandas-dev/pandas/pulls/58018
2024-03-26T19:29:43Z
2024-03-28T03:01:12Z
2024-03-28T03:01:12Z
2024-03-28T17:50:12Z
Docs: Add note about exception for integer slices with float indices
diff --git a/doc/source/user_guide/indexing.rst b/doc/source/user_guide/indexing.rst index 24cdbad41fe60..fd843ca68a60b 100644 --- a/doc/source/user_guide/indexing.rst +++ b/doc/source/user_guide/indexing.rst @@ -262,6 +262,10 @@ The most robust and consistent way of slicing ranges along arbitrary axes is described in the :ref:`Selection by Position <indexing.integer>` section detailing the ``.iloc`` method. For now, we explain the semantics of slicing using the ``[]`` operator. + .. note:: + + When the :class:`Series` has float indices, slicing will select by position. + With Series, the syntax works exactly as with an ndarray, returning a slice of the values and the corresponding labels:
- [x] closes #57277 - [N/A] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [N/A] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [N/A] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/58017
2024-03-26T19:22:07Z
2024-04-02T00:37:53Z
2024-04-02T00:37:53Z
2024-04-02T19:25:43Z
PERF: Allow np.integer Series/Index to convert to RangeIndex
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 76dd19a9424f5..7c5b88258e6bb 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -7157,17 +7157,22 @@ def maybe_sequence_to_range(sequence) -> Any | range: ------- Any : input or range """ - if isinstance(sequence, (ABCSeries, Index, range, ExtensionArray)): + if isinstance(sequence, (range, ExtensionArray)): return sequence elif len(sequence) == 1 or lib.infer_dtype(sequence, skipna=False) != "integer": return sequence - elif len(sequence) == 0: + elif isinstance(sequence, (ABCSeries, Index)) and not ( + isinstance(sequence.dtype, np.dtype) and sequence.dtype.kind == "i" + ): + return sequence + if len(sequence) == 0: return range(0) - diff = sequence[1] - sequence[0] + np_sequence = np.asarray(sequence, dtype=np.int64) + diff = np_sequence[1] - np_sequence[0] if diff == 0: return sequence - elif len(sequence) == 2 or lib.is_sequence_range(np.asarray(sequence), diff): - return range(sequence[0], sequence[-1] + diff, diff) + elif len(sequence) == 2 or lib.is_sequence_range(np_sequence, diff): + return range(np_sequence[0], np_sequence[-1] + diff, diff) else: return sequence diff --git a/pandas/tests/frame/methods/test_set_index.py b/pandas/tests/frame/methods/test_set_index.py index 4fbc84cd1a66c..a1968c6c694d5 100644 --- a/pandas/tests/frame/methods/test_set_index.py +++ b/pandas/tests/frame/methods/test_set_index.py @@ -148,7 +148,7 @@ def test_set_index_dst(self): def test_set_index(self, float_string_frame): df = float_string_frame - idx = Index(np.arange(len(df))[::-1]) + idx = Index(np.arange(len(df) - 1, -1, -1, dtype=np.int64)) df = df.set_index(idx) tm.assert_index_equal(df.index, idx) diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py index 9078ca865042d..0cc8018ea6213 100644 --- a/pandas/tests/io/test_stata.py +++ b/pandas/tests/io/test_stata.py @@ -513,7 +513,6 @@ def test_read_write_reread_dta14(self, file, parsed_114, version, datapath): written_and_read_again = self.read_dta(path) expected = parsed_114.copy() - expected.index = expected.index.astype(np.int32) tm.assert_frame_equal(written_and_read_again.set_index("index"), expected) @pytest.mark.parametrize( @@ -576,7 +575,6 @@ def test_numeric_column_names(self): written_and_read_again.columns = map(convert_col_name, columns) expected = original - expected.index = expected.index.astype(np.int32) tm.assert_frame_equal(expected, written_and_read_again) @pytest.mark.parametrize("version", [114, 117, 118, 119, None]) @@ -594,7 +592,6 @@ def test_nan_to_missing_value(self, version): written_and_read_again = written_and_read_again.set_index("index") expected = original - expected.index = expected.index.astype(np.int32) tm.assert_frame_equal(written_and_read_again, expected) def test_no_index(self): @@ -617,7 +614,6 @@ def test_string_no_dates(self): written_and_read_again = self.read_dta(path) expected = original - expected.index = expected.index.astype(np.int32) tm.assert_frame_equal(written_and_read_again.set_index("index"), expected) def test_large_value_conversion(self): @@ -637,7 +633,6 @@ def test_large_value_conversion(self): modified["s1"] = Series(modified["s1"], dtype=np.int16) modified["s2"] = Series(modified["s2"], dtype=np.int32) modified["s3"] = Series(modified["s3"], dtype=np.float64) - modified.index = original.index.astype(np.int32) tm.assert_frame_equal(written_and_read_again.set_index("index"), modified) def test_dates_invalid_column(self): @@ -713,7 +708,7 @@ def test_write_missing_strings(self): expected = DataFrame( [["1"], [""]], - index=pd.Index([0, 1], dtype=np.int32, name="index"), + index=pd.RangeIndex(2, name="index"), columns=["foo"], ) @@ -746,7 +741,6 @@ def test_bool_uint(self, byteorder, version): written_and_read_again = written_and_read_again.set_index("index") expected = original - expected.index = expected.index.astype(np.int32) expected_types = ( np.int8, np.int8, @@ -1030,7 +1024,7 @@ def test_categorical_writing(self, version): res = written_and_read_again.set_index("index") expected = original - expected.index = expected.index.set_names("index").astype(np.int32) + expected.index = expected.index.set_names("index") expected["incompletely_labeled"] = expected["incompletely_labeled"].apply(str) expected["unlabeled"] = expected["unlabeled"].apply(str) @@ -1094,7 +1088,6 @@ def test_categorical_with_stata_missing_values(self, version): new_cats = cat.remove_unused_categories().categories cat = cat.set_categories(new_cats, ordered=True) expected[col] = cat - expected.index = expected.index.astype(np.int32) tm.assert_frame_equal(res, expected) @pytest.mark.parametrize("file", ["stata10_115", "stata10_117"]) @@ -1544,7 +1537,6 @@ def test_out_of_range_float(self): original["ColumnTooBig"] = original["ColumnTooBig"].astype(np.float64) expected = original - expected.index = expected.index.astype(np.int32) tm.assert_frame_equal(reread.set_index("index"), expected) @pytest.mark.parametrize("infval", [np.inf, -np.inf]) @@ -1669,7 +1661,6 @@ def test_writer_117(self): original["int32"] = original["int32"].astype(np.int32) original["float32"] = Series(original["float32"], dtype=np.float32) original.index.name = "index" - original.index = original.index.astype(np.int32) copy = original.copy() with tm.ensure_clean() as path: original.to_stata( @@ -1962,7 +1953,7 @@ def test_read_write_ea_dtypes(self, dtype_backend): # stata stores with ms unit, so unit does not round-trip exactly "e": pd.date_range("2020-12-31", periods=3, freq="D", unit="ms"), }, - index=pd.Index([0, 1, 2], name="index", dtype=np.int32), + index=pd.RangeIndex(range(3), name="index"), ) tm.assert_frame_equal(written_and_read_again.set_index("index"), expected) @@ -2049,7 +2040,6 @@ def test_compression(compression, version, use_dict, infer, compression_to_exten reread = read_stata(fp, index_col="index") expected = df - expected.index = expected.index.astype(np.int32) tm.assert_frame_equal(reread, expected) @@ -2075,7 +2065,6 @@ def test_compression_dict(method, file_ext): reread = read_stata(fp, index_col="index") expected = df - expected.index = expected.index.astype(np.int32) tm.assert_frame_equal(reread, expected) @@ -2085,7 +2074,6 @@ def test_chunked_categorical(version): df.index.name = "index" expected = df.copy() - expected.index = expected.index.astype(np.int32) with tm.ensure_clean() as path: df.to_stata(path, version=version) @@ -2094,7 +2082,9 @@ def test_chunked_categorical(version): block = block.set_index("index") assert "cats" in block tm.assert_series_equal( - block.cats, expected.cats.iloc[2 * i : 2 * (i + 1)] + block.cats, + expected.cats.iloc[2 * i : 2 * (i + 1)], + check_index_type=len(block) > 1, ) diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index 1cd52ab1ae8b4..1a764cb505ead 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -2192,23 +2192,28 @@ def test_merge_on_indexes(self, how, sort, expected): @pytest.mark.parametrize( "index", - [Index([1, 2], dtype=dtyp, name="index_col") for dtyp in tm.ALL_REAL_NUMPY_DTYPES] + [ + Index([1, 2, 4], dtype=dtyp, name="index_col") + for dtyp in tm.ALL_REAL_NUMPY_DTYPES + ] + [ - CategoricalIndex(["A", "B"], categories=["A", "B"], name="index_col"), - RangeIndex(start=0, stop=2, name="index_col"), - DatetimeIndex(["2018-01-01", "2018-01-02"], name="index_col"), + CategoricalIndex(["A", "B", "C"], categories=["A", "B", "C"], name="index_col"), + RangeIndex(start=0, stop=3, name="index_col"), + DatetimeIndex(["2018-01-01", "2018-01-02", "2018-01-03"], name="index_col"), ], ids=lambda x: f"{type(x).__name__}[{x.dtype}]", ) def test_merge_index_types(index): # gh-20777 # assert key access is consistent across index types - left = DataFrame({"left_data": [1, 2]}, index=index) - right = DataFrame({"right_data": [1.0, 2.0]}, index=index) + left = DataFrame({"left_data": [1, 2, 3]}, index=index) + right = DataFrame({"right_data": [1.0, 2.0, 3.0]}, index=index) result = left.merge(right, on=["index_col"]) - expected = DataFrame({"left_data": [1, 2], "right_data": [1.0, 2.0]}, index=index) + expected = DataFrame( + {"left_data": [1, 2, 3], "right_data": [1.0, 2.0, 3.0]}, index=index + ) tm.assert_frame_equal(result, expected)
Discovered in https://github.com/pandas-dev/pandas/pull/57441
https://api.github.com/repos/pandas-dev/pandas/pulls/58016
2024-03-26T18:09:17Z
2024-04-01T18:21:15Z
2024-04-01T18:21:15Z
2024-04-04T01:43:59Z
DEPS: bump adbc-driver-sqlite min version to 0.10.0
diff --git a/ci/deps/actions-310.yaml b/ci/deps/actions-310.yaml index 1b68fa4fc22e6..f87b0ba6976a9 100644 --- a/ci/deps/actions-310.yaml +++ b/ci/deps/actions-310.yaml @@ -23,6 +23,8 @@ dependencies: - pytz # optional dependencies + - adbc-driver-postgresql>=0.10.0 + - adbc-driver-sqlite>=0.10.0 - beautifulsoup4>=4.11.2 - blosc>=1.21.3 - bottleneck>=1.3.6 @@ -57,7 +59,5 @@ dependencies: - zstandard>=0.19.0 - pip: - - adbc-driver-postgresql>=0.10.0 - - adbc-driver-sqlite>=0.8.0 - tzdata>=2022.7 - pytest-localserver>=0.7.1 diff --git a/ci/deps/actions-311-downstream_compat.yaml b/ci/deps/actions-311-downstream_compat.yaml index 893e585cb890e..8c9c4b17971b7 100644 --- a/ci/deps/actions-311-downstream_compat.yaml +++ b/ci/deps/actions-311-downstream_compat.yaml @@ -25,6 +25,8 @@ dependencies: - pytz # optional dependencies + - adbc-driver-postgresql>=0.10.0 + - adbc-driver-sqlite>=0.10.0 - beautifulsoup4>=4.11.2 - blosc>=1.21.3 - bottleneck>=1.3.6 @@ -72,6 +74,4 @@ dependencies: - pyyaml - py - pip: - - adbc-driver-postgresql>=0.10.0 - - adbc-driver-sqlite>=0.8.0 - tzdata>=2022.7 diff --git a/ci/deps/actions-311.yaml b/ci/deps/actions-311.yaml index 20124b24a6b9a..3ecd822b6b978 100644 --- a/ci/deps/actions-311.yaml +++ b/ci/deps/actions-311.yaml @@ -23,6 +23,8 @@ dependencies: - pytz # optional dependencies + - adbc-driver-postgresql>=0.10.0 + - adbc-driver-sqlite>=0.10.0 - beautifulsoup4>=4.11.2 - blosc>=1.21.3 - bottleneck>=1.3.6 @@ -57,6 +59,4 @@ dependencies: - zstandard>=0.19.0 - pip: - - adbc-driver-postgresql>=0.10.0 - - adbc-driver-sqlite>=0.8.0 - pytest-localserver>=0.7.1 diff --git a/ci/deps/actions-312.yaml b/ci/deps/actions-312.yaml index eb70816c241bb..087a757daf7aa 100644 --- a/ci/deps/actions-312.yaml +++ b/ci/deps/actions-312.yaml @@ -23,6 +23,8 @@ dependencies: - pytz # optional dependencies + - adbc-driver-postgresql>=0.10.0 + - adbc-driver-sqlite>=0.10.0 - beautifulsoup4>=4.11.2 - blosc>=1.21.3 - bottleneck>=1.3.6 @@ -57,7 +59,5 @@ dependencies: - zstandard>=0.19.0 - pip: - - adbc-driver-postgresql>=0.10.0 - - adbc-driver-sqlite>=0.8.0 - tzdata>=2022.7 - pytest-localserver>=0.7.1 diff --git a/ci/deps/actions-39-minimum_versions.yaml b/ci/deps/actions-39-minimum_versions.yaml index 4399aa748af5c..a5bd2fda69316 100644 --- a/ci/deps/actions-39-minimum_versions.yaml +++ b/ci/deps/actions-39-minimum_versions.yaml @@ -26,6 +26,8 @@ dependencies: - pytz=2020.1 # optional dependencies + - adbc-driver-postgresql=0.10.0 + - adbc-driver-sqlite=0.10.0 - beautifulsoup4=4.11.2 - blosc=1.21.3 - bottleneck=1.3.6 @@ -60,6 +62,4 @@ dependencies: - zstandard=0.19.0 - pip: - - adbc-driver-postgresql==0.10.0 - - adbc-driver-sqlite==0.8.0 - tzdata==2022.7 diff --git a/ci/deps/actions-39.yaml b/ci/deps/actions-39.yaml index 92df608f17c6c..654cbe25ef9e2 100644 --- a/ci/deps/actions-39.yaml +++ b/ci/deps/actions-39.yaml @@ -23,6 +23,8 @@ dependencies: - pytz # optional dependencies + - adbc-driver-postgresql>=0.10.0 + - adbc-driver-sqlite>=0.10.0 - beautifulsoup4>=4.11.2 - blosc>=1.21.3 - bottleneck>=1.3.6 @@ -57,7 +59,5 @@ dependencies: - zstandard>=0.19.0 - pip: - - adbc-driver-postgresql>=0.10.0 - - adbc-driver-sqlite>=0.8.0 - tzdata>=2022.7 - pytest-localserver>=0.7.1 diff --git a/ci/deps/circle-310-arm64.yaml b/ci/deps/circle-310-arm64.yaml index 869aae8596681..a81dd5b1d89a0 100644 --- a/ci/deps/circle-310-arm64.yaml +++ b/ci/deps/circle-310-arm64.yaml @@ -24,6 +24,8 @@ dependencies: - pytz # optional dependencies + - adbc-driver-postgresql>=0.10.0 + - adbc-driver-sqlite>=0.10.0 - beautifulsoup4>=4.11.2 - blosc>=1.21.3 - bottleneck>=1.3.6 @@ -56,6 +58,3 @@ dependencies: - xlrd>=2.0.1 - xlsxwriter>=3.0.5 - zstandard>=0.19.0 - - pip: - - adbc-driver-postgresql>=0.8.0 - - adbc-driver-sqlite>=0.8.0 diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst index 11c16dd9dabcc..00c7858f28633 100644 --- a/doc/source/getting_started/install.rst +++ b/doc/source/getting_started/install.rst @@ -347,7 +347,7 @@ SQLAlchemy 2.0.0 postgresql, SQL support for dat psycopg2 2.9.6 postgresql PostgreSQL engine for sqlalchemy pymysql 1.0.2 mysql MySQL engine for sqlalchemy adbc-driver-postgresql 0.10.0 postgresql ADBC Driver for PostgreSQL -adbc-driver-sqlite 0.8.0 sql-other ADBC Driver for SQLite +adbc-driver-sqlite 0.10.0 sql-other ADBC Driver for SQLite ========================= ================== =============== ============================================================= Other data sources diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index fb33601263c5d..da32689fc9366 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -138,6 +138,8 @@ Optional libraries below the lowest tested version may still work, but are not c +------------------------+---------------------+ | adbc-driver-postgresql | 0.10.0 | +------------------------+---------------------+ +| adbc-driver-sqlite | 0.10.0 | ++------------------------+---------------------+ See :ref:`install.dependencies` and :ref:`install.optional_dependencies` for more. diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py index d6e01a168fba1..8fa504efd7a11 100644 --- a/pandas/compat/_optional.py +++ b/pandas/compat/_optional.py @@ -21,7 +21,7 @@ VERSIONS = { "adbc-driver-postgresql": "0.10.0", - "adbc-driver-sqlite": "0.8.0", + "adbc-driver-sqlite": "0.10.0", "bs4": "4.11.2", "blosc": "1.21.3", "bottleneck": "1.3.6", diff --git a/pyproject.toml b/pyproject.toml index 84d6eca552b54..67a9f2d187c7d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -78,7 +78,7 @@ hdf5 = [# blosc only available on conda (https://github.com/Blosc/python-blosc/i spss = ['pyreadstat>=1.2.0'] postgresql = ['SQLAlchemy>=2.0.0', 'psycopg2>=2.9.6', 'adbc-driver-postgresql>=0.10.0'] mysql = ['SQLAlchemy>=2.0.0', 'pymysql>=1.0.2'] -sql-other = ['SQLAlchemy>=2.0.0', 'adbc-driver-postgresql>=0.10.0', 'adbc-driver-sqlite>=0.8.0'] +sql-other = ['SQLAlchemy>=2.0.0', 'adbc-driver-postgresql>=0.10.0', 'adbc-driver-sqlite>=0.10.0'] html = ['beautifulsoup4>=4.11.2', 'html5lib>=1.1', 'lxml>=4.9.2'] xml = ['lxml>=4.9.2'] plot = ['matplotlib>=3.6.3'] @@ -86,7 +86,7 @@ output-formatting = ['jinja2>=3.1.2', 'tabulate>=0.9.0'] clipboard = ['PyQt5>=5.15.9', 'qtpy>=2.3.0'] compression = ['zstandard>=0.19.0'] all = ['adbc-driver-postgresql>=0.10.0', - 'adbc-driver-sqlite>=0.8.0', + 'adbc-driver-sqlite>=0.10.0', 'beautifulsoup4>=4.11.2', # blosc only available on conda (https://github.com/Blosc/python-blosc/issues/297) #'blosc>=1.21.3',
xref https://github.com/pandas-dev/pandas/pull/58010 These are on conda-forge now, so might as well install it there: https://anaconda.org/conda-forge/adbc-driver-postgresql
https://api.github.com/repos/pandas-dev/pandas/pulls/58014
2024-03-26T17:58:37Z
2024-03-26T20:31:22Z
null
2024-03-26T20:31:24Z
Add tests for transform sum with series
diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py index 46f6367fbb3ed..ed7aa9d27e452 100644 --- a/pandas/tests/groupby/transform/test_transform.py +++ b/pandas/tests/groupby/transform/test_transform.py @@ -1490,3 +1490,47 @@ def test_idxmin_idxmax_transform_args(how, skipna, numeric_only): msg = f"DataFrameGroupBy.{how} with skipna=False encountered an NA value" with pytest.raises(ValueError, match=msg): gb.transform(how, skipna, numeric_only) + + +def test_transform_sum_one_column_no_matching_labels(): + df = DataFrame({"X": [1.0]}) + series = Series(["Y"]) + result = df.groupby(series, as_index=False).transform("sum") + expected = DataFrame({"X": [1.0]}) + tm.assert_frame_equal(result, expected) + + +def test_transform_sum_no_matching_labels(): + df = DataFrame({"X": [1.0, -93204, 4935]}) + series = Series(["A", "B", "C"]) + + result = df.groupby(series, as_index=False).transform("sum") + expected = DataFrame({"X": [1.0, -93204, 4935]}) + tm.assert_frame_equal(result, expected) + + +def test_transform_sum_one_column_with_matching_labels(): + df = DataFrame({"X": [1.0, -93204, 4935]}) + series = Series(["A", "B", "A"]) + + result = df.groupby(series, as_index=False).transform("sum") + expected = DataFrame({"X": [4936.0, -93204, 4936.0]}) + tm.assert_frame_equal(result, expected) + + +def test_transform_sum_one_column_with_missing_labels(): + df = DataFrame({"X": [1.0, -93204, 4935]}) + series = Series(["A", "C"]) + + result = df.groupby(series, as_index=False).transform("sum") + expected = DataFrame({"X": [1.0, -93204, np.nan]}) + tm.assert_frame_equal(result, expected) + + +def test_transform_sum_one_column_with_matching_labels_and_missing_labels(): + df = DataFrame({"X": [1.0, -93204, 4935]}) + series = Series(["A", "A"]) + + result = df.groupby(series, as_index=False).transform("sum") + expected = DataFrame({"X": [-93203.0, -93203.0, np.nan]}) + tm.assert_frame_equal(result, expected)
- [x] closes #37093 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/58012
2024-03-26T09:56:19Z
2024-03-28T17:58:52Z
2024-03-28T17:58:52Z
2024-03-28T17:59:04Z
DEPR: enforce deprecation of non-standard argument to take
diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index 549d49aaa1853..547055082ced3 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -218,6 +218,7 @@ Removal of prior version deprecations/changes - Enforced deprecation of :meth:`Series.interpolate` and :meth:`DataFrame.interpolate` for object-dtype (:issue:`57820`) - Enforced deprecation of :meth:`offsets.Tick.delta`, use ``pd.Timedelta(obj)`` instead (:issue:`55498`) - Enforced deprecation of ``axis=None`` acting the same as ``axis=0`` in the DataFrame reductions ``sum``, ``prod``, ``std``, ``var``, and ``sem``, passing ``axis=None`` will now reduce over both axes; this is particularly the case when doing e.g. ``numpy.sum(df)`` (:issue:`21597`) +- Enforced deprecation of non-standard (``np.ndarray``, :class:`ExtensionArray`, :class:`Index`, or :class:`Series`) argument to :func:`api.extensions.take` (:issue:`52981`) - Enforced deprecation of parsing system timezone strings to ``tzlocal``, which depended on system timezone, pass the 'tz' keyword instead (:issue:`50791`) - Enforced deprecation of passing a dictionary to :meth:`SeriesGroupBy.agg` (:issue:`52268`) - Enforced deprecation of string ``AS`` denoting frequency in :class:`YearBegin` and strings ``AS-DEC``, ``AS-JAN``, etc. denoting annual frequencies with various fiscal year starts (:issue:`57793`) diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 8620aafd97528..6a6096567c65d 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -43,7 +43,6 @@ ensure_float64, ensure_object, ensure_platform_int, - is_array_like, is_bool_dtype, is_complex_dtype, is_dict_like, @@ -1163,28 +1162,30 @@ def take( """ if not isinstance(arr, (np.ndarray, ABCExtensionArray, ABCIndex, ABCSeries)): # GH#52981 - warnings.warn( - "pd.api.extensions.take accepting non-standard inputs is deprecated " - "and will raise in a future version. Pass either a numpy.ndarray, " - "ExtensionArray, Index, or Series instead.", - FutureWarning, - stacklevel=find_stack_level(), + raise TypeError( + "pd.api.extensions.take requires a numpy.ndarray, " + f"ExtensionArray, Index, or Series, got {type(arr).__name__}." ) - if not is_array_like(arr): - arr = np.asarray(arr) - indices = ensure_platform_int(indices) if allow_fill: # Pandas style, -1 means NA validate_indices(indices, arr.shape[axis]) + # error: Argument 1 to "take_nd" has incompatible type + # "ndarray[Any, Any] | ExtensionArray | Index | Series"; expected + # "ndarray[Any, Any]" result = take_nd( - arr, indices, axis=axis, allow_fill=True, fill_value=fill_value + arr, # type: ignore[arg-type] + indices, + axis=axis, + allow_fill=True, + fill_value=fill_value, ) else: # NumPy style - result = arr.take(indices, axis=axis) + # error: Unexpected keyword argument "axis" for "take" of "ExtensionArray" + result = arr.take(indices, axis=axis) # type: ignore[call-arg,assignment] return result diff --git a/pandas/tests/test_take.py b/pandas/tests/test_take.py index 4f34ab34c35f0..ce2e4e0f6cec5 100644 --- a/pandas/tests/test_take.py +++ b/pandas/tests/test_take.py @@ -299,9 +299,11 @@ def test_take_na_empty(self): tm.assert_numpy_array_equal(result, expected) def test_take_coerces_list(self): + # GH#52981 coercing is deprecated, disabled in 3.0 arr = [1, 2, 3] - msg = "take accepting non-standard inputs is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg): - result = algos.take(arr, [0, 0]) - expected = np.array([1, 1]) - tm.assert_numpy_array_equal(result, expected) + msg = ( + "pd.api.extensions.take requires a numpy.ndarray, ExtensionArray, " + "Index, or Series, got list" + ) + with pytest.raises(TypeError, match=msg): + algos.take(arr, [0, 0])
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/58011
2024-03-26T02:37:10Z
2024-03-28T17:57:15Z
2024-03-28T17:57:15Z
2024-03-28T20:08:57Z
DEPS: bump adbc-driver-postgresql min version to 0.10.0
diff --git a/ci/deps/actions-310.yaml b/ci/deps/actions-310.yaml index 85ee5230b31be..1b68fa4fc22e6 100644 --- a/ci/deps/actions-310.yaml +++ b/ci/deps/actions-310.yaml @@ -57,7 +57,7 @@ dependencies: - zstandard>=0.19.0 - pip: - - adbc-driver-postgresql>=0.8.0 + - adbc-driver-postgresql>=0.10.0 - adbc-driver-sqlite>=0.8.0 - tzdata>=2022.7 - pytest-localserver>=0.7.1 diff --git a/ci/deps/actions-311-downstream_compat.yaml b/ci/deps/actions-311-downstream_compat.yaml index efd790d77afbb..893e585cb890e 100644 --- a/ci/deps/actions-311-downstream_compat.yaml +++ b/ci/deps/actions-311-downstream_compat.yaml @@ -72,6 +72,6 @@ dependencies: - pyyaml - py - pip: - - adbc-driver-postgresql>=0.8.0 + - adbc-driver-postgresql>=0.10.0 - adbc-driver-sqlite>=0.8.0 - tzdata>=2022.7 diff --git a/ci/deps/actions-311.yaml b/ci/deps/actions-311.yaml index 535c260582eec..20124b24a6b9a 100644 --- a/ci/deps/actions-311.yaml +++ b/ci/deps/actions-311.yaml @@ -57,6 +57,6 @@ dependencies: - zstandard>=0.19.0 - pip: - - adbc-driver-postgresql>=0.8.0 + - adbc-driver-postgresql>=0.10.0 - adbc-driver-sqlite>=0.8.0 - pytest-localserver>=0.7.1 diff --git a/ci/deps/actions-312.yaml b/ci/deps/actions-312.yaml index 8b3f19f55e4b6..eb70816c241bb 100644 --- a/ci/deps/actions-312.yaml +++ b/ci/deps/actions-312.yaml @@ -57,7 +57,7 @@ dependencies: - zstandard>=0.19.0 - pip: - - adbc-driver-postgresql>=0.8.0 + - adbc-driver-postgresql>=0.10.0 - adbc-driver-sqlite>=0.8.0 - tzdata>=2022.7 - pytest-localserver>=0.7.1 diff --git a/ci/deps/actions-39-minimum_versions.yaml b/ci/deps/actions-39-minimum_versions.yaml index 94cb21d1621b6..4399aa748af5c 100644 --- a/ci/deps/actions-39-minimum_versions.yaml +++ b/ci/deps/actions-39-minimum_versions.yaml @@ -60,6 +60,6 @@ dependencies: - zstandard=0.19.0 - pip: - - adbc-driver-postgresql==0.8.0 + - adbc-driver-postgresql==0.10.0 - adbc-driver-sqlite==0.8.0 - tzdata==2022.7 diff --git a/ci/deps/actions-39.yaml b/ci/deps/actions-39.yaml index 4cc9b1fbe2491..92df608f17c6c 100644 --- a/ci/deps/actions-39.yaml +++ b/ci/deps/actions-39.yaml @@ -57,7 +57,7 @@ dependencies: - zstandard>=0.19.0 - pip: - - adbc-driver-postgresql>=0.8.0 + - adbc-driver-postgresql>=0.10.0 - adbc-driver-sqlite>=0.8.0 - tzdata>=2022.7 - pytest-localserver>=0.7.1 diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst index 77e273d8c81fe..11c16dd9dabcc 100644 --- a/doc/source/getting_started/install.rst +++ b/doc/source/getting_started/install.rst @@ -346,7 +346,7 @@ SQLAlchemy 2.0.0 postgresql, SQL support for dat sql-other psycopg2 2.9.6 postgresql PostgreSQL engine for sqlalchemy pymysql 1.0.2 mysql MySQL engine for sqlalchemy -adbc-driver-postgresql 0.8.0 postgresql ADBC Driver for PostgreSQL +adbc-driver-postgresql 0.10.0 postgresql ADBC Driver for PostgreSQL adbc-driver-sqlite 0.8.0 sql-other ADBC Driver for SQLite ========================= ================== =============== ============================================================= diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index a398b93b60018..b538b5bef4eb0 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -129,11 +129,13 @@ For `optional libraries <https://pandas.pydata.org/docs/getting_started/install. The following table lists the lowest version per library that is currently being tested throughout the development of pandas. Optional libraries below the lowest tested version may still work, but are not considered supported. -+-----------------+---------------------+ -| Package | New Minimum Version | -+=================+=====================+ -| fastparquet | 2023.04.0 | -+-----------------+---------------------+ ++------------------------+---------------------+ +| Package | New Minimum Version | ++========================+=====================+ +| fastparquet | 2023.04.0 | ++------------------------+---------------------+ +| adbc-driver-postgresql | 0.10.0 | ++------------------------+---------------------+ See :ref:`install.dependencies` and :ref:`install.optional_dependencies` for more. diff --git a/environment.yml b/environment.yml index e7bf2556d27f8..020154e650c5b 100644 --- a/environment.yml +++ b/environment.yml @@ -116,7 +116,7 @@ dependencies: - pygments # Code highlighting - pip: - - adbc-driver-postgresql>=0.8.0 + - adbc-driver-postgresql>=0.10.0 - adbc-driver-sqlite>=0.8.0 - typing_extensions; python_version<"3.11" - tzdata>=2022.7 diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py index f9273ba4bbc62..d6e01a168fba1 100644 --- a/pandas/compat/_optional.py +++ b/pandas/compat/_optional.py @@ -20,7 +20,7 @@ # deps_minimum.toml & pyproject.toml when updating versions! VERSIONS = { - "adbc-driver-postgresql": "0.8.0", + "adbc-driver-postgresql": "0.10.0", "adbc-driver-sqlite": "0.8.0", "bs4": "4.11.2", "blosc": "1.21.3", diff --git a/pyproject.toml b/pyproject.toml index f96fbee4a5818..84d6eca552b54 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -76,16 +76,16 @@ hdf5 = [# blosc only available on conda (https://github.com/Blosc/python-blosc/i #'blosc>=1.20.1', 'tables>=3.8.0'] spss = ['pyreadstat>=1.2.0'] -postgresql = ['SQLAlchemy>=2.0.0', 'psycopg2>=2.9.6', 'adbc-driver-postgresql>=0.8.0'] +postgresql = ['SQLAlchemy>=2.0.0', 'psycopg2>=2.9.6', 'adbc-driver-postgresql>=0.10.0'] mysql = ['SQLAlchemy>=2.0.0', 'pymysql>=1.0.2'] -sql-other = ['SQLAlchemy>=2.0.0', 'adbc-driver-postgresql>=0.8.0', 'adbc-driver-sqlite>=0.8.0'] +sql-other = ['SQLAlchemy>=2.0.0', 'adbc-driver-postgresql>=0.10.0', 'adbc-driver-sqlite>=0.8.0'] html = ['beautifulsoup4>=4.11.2', 'html5lib>=1.1', 'lxml>=4.9.2'] xml = ['lxml>=4.9.2'] plot = ['matplotlib>=3.6.3'] output-formatting = ['jinja2>=3.1.2', 'tabulate>=0.9.0'] clipboard = ['PyQt5>=5.15.9', 'qtpy>=2.3.0'] compression = ['zstandard>=0.19.0'] -all = ['adbc-driver-postgresql>=0.8.0', +all = ['adbc-driver-postgresql>=0.10.0', 'adbc-driver-sqlite>=0.8.0', 'beautifulsoup4>=4.11.2', # blosc only available on conda (https://github.com/Blosc/python-blosc/issues/297) diff --git a/requirements-dev.txt b/requirements-dev.txt index 0cc064d2660bb..0ea0eba369158 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -84,7 +84,7 @@ feedparser pyyaml requests pygments -adbc-driver-postgresql>=0.8.0 +adbc-driver-postgresql>=0.10.0 adbc-driver-sqlite>=0.8.0 typing_extensions; python_version<"3.11" tzdata>=2022.7
Broken off from #55901
https://api.github.com/repos/pandas-dev/pandas/pulls/58010
2024-03-26T02:20:17Z
2024-03-26T17:04:29Z
2024-03-26T17:04:29Z
2024-03-26T17:07:35Z
DEPR: value_counts doing dtype inference on result.index
diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index a398b93b60018..4fd2f46fc71fd 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -199,6 +199,7 @@ Removal of prior version deprecations/changes - All arguments in :meth:`Series.to_dict` are now keyword only (:issue:`56493`) - Changed the default value of ``observed`` in :meth:`DataFrame.groupby` and :meth:`Series.groupby` to ``True`` (:issue:`51811`) - Enforced deprecation disallowing parsing datetimes with mixed time zones unless user passes ``utc=True`` to :func:`to_datetime` (:issue:`57275`) +- Enforced deprecation in :meth:`Series.value_counts` and :meth:`Index.value_counts` with object dtype performing dtype inference on the ``.index`` of the result (:issue:`56161`) - Enforced deprecation of :meth:`.DataFrameGroupBy.get_group` and :meth:`.SeriesGroupBy.get_group` allowing the ``name`` argument to be a non-tuple when grouping by a list of length 1 (:issue:`54155`) - Enforced deprecation of :meth:`Series.interpolate` and :meth:`DataFrame.interpolate` for object-dtype (:issue:`57820`) - Enforced deprecation of ``axis=None`` acting the same as ``axis=0`` in the DataFrame reductions ``sum``, ``prod``, ``std``, ``var``, and ``sem``, passing ``axis=None`` will now reduce over both axes; this is particularly the case when doing e.g. ``numpy.sum(df)`` (:issue:`21597`) diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 344314d829c19..8620aafd97528 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -892,26 +892,9 @@ def value_counts_internal( if keys.dtype == np.float16: keys = keys.astype(np.float32) - # For backwards compatibility, we let Index do its normal type - # inference, _except_ for if if infers from object to bool. - idx = Index(keys) - if idx.dtype == bool and keys.dtype == object: - idx = idx.astype(object) - elif ( - idx.dtype != keys.dtype # noqa: PLR1714 # # pylint: disable=R1714 - and idx.dtype != "string[pyarrow_numpy]" - ): - warnings.warn( - # GH#56161 - "The behavior of value_counts with object-dtype is deprecated. " - "In a future version, this will *not* perform dtype inference " - "on the resulting index. To retain the old behavior, use " - "`result.index = result.index.infer_objects()`", - FutureWarning, - stacklevel=find_stack_level(), - ) - idx.name = index_name - + # Starting in 3.0, we no longer perform dtype inference on the + # Index object we construct here, xref GH#56161 + idx = Index(keys, dtype=keys.dtype, name=index_name) result = Series(counts, index=idx, name=name, copy=False) if sort: @@ -1606,16 +1589,8 @@ def union_with_duplicates( """ from pandas import Series - with warnings.catch_warnings(): - # filter warning from object dtype inference; we will end up discarding - # the index here, so the deprecation does not affect the end result here. - warnings.filterwarnings( - "ignore", - "The behavior of value_counts with object-dtype is deprecated", - category=FutureWarning, - ) - l_count = value_counts_internal(lvals, dropna=False) - r_count = value_counts_internal(rvals, dropna=False) + l_count = value_counts_internal(lvals, dropna=False) + r_count = value_counts_internal(rvals, dropna=False) l_count, r_count = l_count.align(r_count, fill_value=0) final_count = np.maximum(l_count.values, r_count.values) final_count = Series(final_count, index=l_count.index, dtype="int", copy=False) diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 56ea28c0b50f8..af666a591b1bc 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -13,7 +13,6 @@ Union, overload, ) -import warnings import numpy as np @@ -1217,15 +1216,8 @@ def value_counts(self, dropna: bool = True) -> Series: Series.value_counts """ # TODO: implement this is a non-naive way! - with warnings.catch_warnings(): - warnings.filterwarnings( - "ignore", - "The behavior of value_counts with object-dtype is deprecated", - category=FutureWarning, - ) - result = value_counts(np.asarray(self), dropna=dropna) - # Once the deprecation is enforced, we will need to do - # `result.index = result.index.astype(self.dtype)` + result = value_counts(np.asarray(self), dropna=dropna) + result.index = result.index.astype(self.dtype) return result # --------------------------------------------------------------------- diff --git a/pandas/tests/base/test_value_counts.py b/pandas/tests/base/test_value_counts.py index a0b0bdfdb46d8..ac40e48f3d523 100644 --- a/pandas/tests/base/test_value_counts.py +++ b/pandas/tests/base/test_value_counts.py @@ -347,9 +347,8 @@ def test_value_counts_object_inference_deprecated(): dti = pd.date_range("2016-01-01", periods=3, tz="UTC") idx = dti.astype(object) - msg = "The behavior of value_counts with object-dtype is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg): - res = idx.value_counts() + res = idx.value_counts() exp = dti.value_counts() + exp.index = exp.index.astype(object) tm.assert_series_equal(res, exp)
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/58009
2024-03-26T02:04:00Z
2024-03-26T17:20:19Z
2024-03-26T17:20:19Z
2024-03-26T17:46:41Z
Backport PR #57553 on branch 2.2.x (API: avoid passing Manager to subclass init)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 5c510d98596df..afcd4d014316e 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -656,26 +656,37 @@ class DataFrame(NDFrame, OpsMixin): def _constructor(self) -> Callable[..., DataFrame]: return DataFrame - def _constructor_from_mgr(self, mgr, axes): - if self._constructor is DataFrame: - # we are pandas.DataFrame (or a subclass that doesn't override _constructor) - return DataFrame._from_mgr(mgr, axes=axes) - else: - assert axes is mgr.axes + def _constructor_from_mgr(self, mgr, axes) -> DataFrame: + df = DataFrame._from_mgr(mgr, axes=axes) + + if type(self) is DataFrame: + # This would also work `if self._constructor is DataFrame`, but + # this check is slightly faster, benefiting the most-common case. + return df + + elif type(self).__name__ == "GeoDataFrame": + # Shim until geopandas can override their _constructor_from_mgr + # bc they have different behavior for Managers than for DataFrames return self._constructor(mgr) + # We assume that the subclass __init__ knows how to handle a + # pd.DataFrame object. + return self._constructor(df) + _constructor_sliced: Callable[..., Series] = Series - def _sliced_from_mgr(self, mgr, axes) -> Series: - return Series._from_mgr(mgr, axes) + def _constructor_sliced_from_mgr(self, mgr, axes) -> Series: + ser = Series._from_mgr(mgr, axes) + ser._name = None # caller is responsible for setting real name - def _constructor_sliced_from_mgr(self, mgr, axes): - if self._constructor_sliced is Series: - ser = self._sliced_from_mgr(mgr, axes) - ser._name = None # caller is responsible for setting real name + if type(self) is DataFrame: + # This would also work `if self._constructor_sliced is Series`, but + # this check is slightly faster, benefiting the most-common case. return ser - assert axes is mgr.axes - return self._constructor_sliced(mgr) + + # We assume that the subclass __init__ knows how to handle a + # pd.Series object. + return self._constructor_sliced(ser) # ---------------------------------------------------------------------- # Constructors @@ -1403,7 +1414,8 @@ def _get_values_for_csv( na_rep=na_rep, quoting=quoting, ) - return self._constructor_from_mgr(mgr, axes=mgr.axes) + # error: Incompatible return value type (got "DataFrame", expected "Self") + return self._constructor_from_mgr(mgr, axes=mgr.axes) # type: ignore[return-value] # ---------------------------------------------------------------------- @@ -5077,7 +5089,8 @@ def predicate(arr: ArrayLike) -> bool: return True mgr = self._mgr._get_data_subset(predicate).copy(deep=None) - return self._constructor_from_mgr(mgr, axes=mgr.axes).__finalize__(self) + # error: Incompatible return value type (got "DataFrame", expected "Self") + return self._constructor_from_mgr(mgr, axes=mgr.axes).__finalize__(self) # type: ignore[return-value] def insert( self, diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 2a86f75badecd..796357355fef4 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -336,6 +336,7 @@ def _as_manager(self, typ: str, copy: bool_t = True) -> Self: # fastpath of passing a manager doesn't check the option/manager class return self._constructor_from_mgr(new_mgr, axes=new_mgr.axes).__finalize__(self) + @final @classmethod def _from_mgr(cls, mgr: Manager, axes: list[Index]) -> Self: """ diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 2d430ef4dcff6..0dd808a0ab296 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -2548,7 +2548,8 @@ def _take_new_index( if axis == 1: raise NotImplementedError("axis 1 is not supported") new_mgr = obj._mgr.reindex_indexer(new_axis=new_index, indexer=indexer, axis=1) - return obj._constructor_from_mgr(new_mgr, axes=new_mgr.axes) + # error: Incompatible return value type (got "DataFrame", expected "NDFrameT") + return obj._constructor_from_mgr(new_mgr, axes=new_mgr.axes) # type: ignore[return-value] else: raise ValueError("'obj' should be either a Series or a DataFrame") diff --git a/pandas/core/series.py b/pandas/core/series.py index c1782206d4b67..6fd019656d207 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -662,14 +662,17 @@ def _constructor(self) -> Callable[..., Series]: return Series def _constructor_from_mgr(self, mgr, axes): - if self._constructor is Series: - # we are pandas.Series (or a subclass that doesn't override _constructor) - ser = Series._from_mgr(mgr, axes=axes) - ser._name = None # caller is responsible for setting real name + ser = Series._from_mgr(mgr, axes=axes) + ser._name = None # caller is responsible for setting real name + + if type(self) is Series: + # This would also work `if self._constructor is Series`, but + # this check is slightly faster, benefiting the most-common case. return ser - else: - assert axes is mgr.axes - return self._constructor(mgr) + + # We assume that the subclass __init__ knows how to handle a + # pd.Series object. + return self._constructor(ser) @property def _constructor_expanddim(self) -> Callable[..., DataFrame]: @@ -681,18 +684,19 @@ def _constructor_expanddim(self) -> Callable[..., DataFrame]: return DataFrame - def _expanddim_from_mgr(self, mgr, axes) -> DataFrame: + def _constructor_expanddim_from_mgr(self, mgr, axes): from pandas.core.frame import DataFrame - return DataFrame._from_mgr(mgr, axes=mgr.axes) + df = DataFrame._from_mgr(mgr, axes=mgr.axes) - def _constructor_expanddim_from_mgr(self, mgr, axes): - from pandas.core.frame import DataFrame + if type(self) is Series: + # This would also work `if self._constructor_expanddim is DataFrame`, + # but this check is slightly faster, benefiting the most-common case. + return df - if self._constructor_expanddim is DataFrame: - return self._expanddim_from_mgr(mgr, axes) - assert axes is mgr.axes - return self._constructor_expanddim(mgr) + # We assume that the subclass __init__ knows how to handle a + # pd.DataFrame object. + return self._constructor_expanddim(df) # types @property diff --git a/pandas/tests/frame/test_subclass.py b/pandas/tests/frame/test_subclass.py index ef78ae62cb4d6..855b58229cbdb 100644 --- a/pandas/tests/frame/test_subclass.py +++ b/pandas/tests/frame/test_subclass.py @@ -26,6 +26,17 @@ def _constructor(self): class TestDataFrameSubclassing: + def test_no_warning_on_mgr(self): + # GH#57032 + df = tm.SubclassedDataFrame( + {"X": [1, 2, 3], "Y": [1, 2, 3]}, index=["a", "b", "c"] + ) + with tm.assert_produces_warning(None): + # df.isna() goes through _constructor_from_mgr, which we want to + # *not* pass a Manager do __init__ + df.isna() + df["X"].isna() + def test_frame_subclassing_and_slicing(self): # Subclass frame and ensure it returns the right class on slicing it # In reference to PR 9632
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/58008
2024-03-26T01:49:10Z
2024-04-01T18:22:03Z
2024-04-01T18:22:03Z
2024-04-01T20:45:12Z
CLN: remove unnecessary check `needs_i8_conversion` if Index subclass does not support `any` or `all`
diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index 84b62563605ac..34ca81e36cbc5 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -1697,7 +1697,7 @@ def pyarrow_meth(data, skip_nulls, **kwargs): except (AttributeError, NotImplementedError, TypeError) as err: msg = ( f"'{type(self).__name__}' with dtype {self.dtype} " - f"does not support reduction '{name}' with pyarrow " + f"does not support operation '{name}' with pyarrow " f"version {pa.__version__}. '{name}' may be supported by " f"upgrading pyarrow." ) diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 76615704f2e33..f37d96bd37614 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -1886,7 +1886,7 @@ def _reduce( Raises ------ - TypeError : subclass does not define reductions + TypeError : subclass does not define operations Examples -------- @@ -1897,7 +1897,7 @@ def _reduce( if meth is None: raise TypeError( f"'{type(self).__name__}' with dtype {self.dtype} " - f"does not support reduction '{name}'" + f"does not support operation '{name}'" ) result = meth(skipna=skipna, **kwargs) if keepdims: diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 52cb175ca79a2..d46810e6ebbdd 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -1662,7 +1662,7 @@ def _groupby_op( if dtype.kind == "M": # Adding/multiplying datetimes is not valid if how in ["any", "all", "sum", "prod", "cumsum", "cumprod", "var", "skew"]: - raise TypeError(f"datetime64 type does not support operation: '{how}'") + raise TypeError(f"datetime64 type does not support operation '{how}'") elif isinstance(dtype, PeriodDtype): # Adding/multiplying Periods is not valid diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 30cf6f0b866ee..fd2a65f9b3289 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -176,7 +176,6 @@ ) from pandas.core.missing import clean_reindex_fill_method from pandas.core.ops import get_op_result_name -from pandas.core.ops.invalid import make_invalid_op from pandas.core.sorting import ( ensure_key_mapped, get_group_index_sorter, @@ -6938,14 +6937,8 @@ def _maybe_disable_logical_methods(self, opname: str_t) -> None: """ raise if this Index subclass does not support any or all. """ - if ( - isinstance(self, ABCMultiIndex) - # TODO(3.0): PeriodArray and DatetimeArray any/all will raise, - # so checking needs_i8_conversion will be unnecessary - or (needs_i8_conversion(self.dtype) and self.dtype.kind != "m") - ): - # This call will raise - make_invalid_op(opname)(self) + if isinstance(self, ABCMultiIndex): + raise TypeError(f"cannot perform {opname} with {type(self).__name__}") @Appender(IndexOpsMixin.argmin.__doc__) def argmin(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index a124e8679ae8e..d0c8d17042741 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -520,7 +520,7 @@ def nanany( if values.dtype.kind == "M": # GH#34479 - raise TypeError("datetime64 type does not support operation: 'any'") + raise TypeError("datetime64 type does not support operation 'any'") values, _ = _get_values(values, skipna, fill_value=False, mask=mask) @@ -576,7 +576,7 @@ def nanall( if values.dtype.kind == "M": # GH#34479 - raise TypeError("datetime64 type does not support operation: 'all'") + raise TypeError("datetime64 type does not support operation 'all'") values, _ = _get_values(values, skipna, fill_value=True, mask=mask) diff --git a/pandas/tests/apply/test_frame_apply.py b/pandas/tests/apply/test_frame_apply.py index 9f3fee686a056..de5f5cac1282c 100644 --- a/pandas/tests/apply/test_frame_apply.py +++ b/pandas/tests/apply/test_frame_apply.py @@ -1209,7 +1209,7 @@ def test_agg_multiple_mixed_raises(): ) # sorted index - msg = "does not support reduction" + msg = "does not support operation" with pytest.raises(TypeError, match=msg): mdf.agg(["min", "sum"]) @@ -1309,7 +1309,7 @@ def test_nuiscance_columns(): ) tm.assert_frame_equal(result, expected) - msg = "does not support reduction" + msg = "does not support operation" with pytest.raises(TypeError, match=msg): df.agg("sum") @@ -1317,7 +1317,7 @@ def test_nuiscance_columns(): expected = Series([6, 6.0, "foobarbaz"], index=["A", "B", "C"]) tm.assert_series_equal(result, expected) - msg = "does not support reduction" + msg = "does not support operation" with pytest.raises(TypeError, match=msg): df.agg(["sum"]) diff --git a/pandas/tests/arrays/categorical/test_operators.py b/pandas/tests/arrays/categorical/test_operators.py index 8778df832d4d7..dbc6cc7715744 100644 --- a/pandas/tests/arrays/categorical/test_operators.py +++ b/pandas/tests/arrays/categorical/test_operators.py @@ -374,14 +374,14 @@ def test_numeric_like_ops(self): # min/max) s = df["value_group"] for op in ["kurt", "skew", "var", "std", "mean", "sum", "median"]: - msg = f"does not support reduction '{op}'" + msg = f"does not support operation '{op}'" with pytest.raises(TypeError, match=msg): getattr(s, op)(numeric_only=False) def test_numeric_like_ops_series(self): # numpy ops s = Series(Categorical([1, 2, 3, 4])) - with pytest.raises(TypeError, match="does not support reduction 'sum'"): + with pytest.raises(TypeError, match="does not support operation 'sum'"): np.sum(s) @pytest.mark.parametrize( diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index 971c5bf487104..cfc04b5c91354 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -247,7 +247,7 @@ def test_scalar_from_string(self, arr1d): assert result == arr1d[0] def test_reduce_invalid(self, arr1d): - msg = "does not support reduction 'not a method'" + msg = "does not support operation 'not a method'" with pytest.raises(TypeError, match=msg): arr1d._reduce("not a method") diff --git a/pandas/tests/extension/base/groupby.py b/pandas/tests/extension/base/groupby.py index dcbbac44d083a..bab8566a06dc2 100644 --- a/pandas/tests/extension/base/groupby.py +++ b/pandas/tests/extension/base/groupby.py @@ -165,7 +165,7 @@ def test_in_numeric_groupby(self, data_for_grouping): # period "does not support sum operations", # datetime - "does not support operation: 'sum'", + "does not support operation 'sum'", # all others re.escape(f"agg function failed [how->sum,dtype->{dtype}"), ] diff --git a/pandas/tests/extension/base/reduce.py b/pandas/tests/extension/base/reduce.py index 03952d87f0ac6..c3a6daee2dd54 100644 --- a/pandas/tests/extension/base/reduce.py +++ b/pandas/tests/extension/base/reduce.py @@ -86,7 +86,7 @@ def test_reduce_series_boolean(self, data, all_boolean_reductions, skipna): # TODO: the message being checked here isn't actually checking anything msg = ( "[Cc]annot perform|Categorical is not ordered for operation|" - "does not support reduction|" + "does not support operation|" ) with pytest.raises(TypeError, match=msg): @@ -105,7 +105,7 @@ def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna): # TODO: the message being checked here isn't actually checking anything msg = ( "[Cc]annot perform|Categorical is not ordered for operation|" - "does not support reduction|" + "does not support operation|" ) with pytest.raises(TypeError, match=msg): diff --git a/pandas/tests/extension/test_datetime.py b/pandas/tests/extension/test_datetime.py index 5de4865feb6f9..a42fa6088d9c8 100644 --- a/pandas/tests/extension/test_datetime.py +++ b/pandas/tests/extension/test_datetime.py @@ -104,7 +104,7 @@ def _supports_reduction(self, obj, op_name: str) -> bool: @pytest.mark.parametrize("skipna", [True, False]) def test_reduce_series_boolean(self, data, all_boolean_reductions, skipna): meth = all_boolean_reductions - msg = f"datetime64 type does not support operation: '{meth}'" + msg = f"datetime64 type does not support operation '{meth}'" with pytest.raises(TypeError, match=msg): super().test_reduce_series_boolean(data, all_boolean_reductions, skipna) diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py index fd3dad37da1f9..c1161a258aaa4 100644 --- a/pandas/tests/frame/test_reductions.py +++ b/pandas/tests/frame/test_reductions.py @@ -975,7 +975,7 @@ def test_sum_mixed_datetime(self): df = DataFrame({"A": date_range("2000", periods=4), "B": [1, 2, 3, 4]}).reindex( [2, 3, 4] ) - with pytest.raises(TypeError, match="does not support reduction 'sum'"): + with pytest.raises(TypeError, match="does not support operation 'sum'"): df.sum() def test_mean_corner(self, float_frame, float_string_frame): @@ -1381,7 +1381,7 @@ def test_any_datetime(self): ] df = DataFrame({"A": float_data, "B": datetime_data}) - msg = "datetime64 type does not support operation: 'any'" + msg = "datetime64 type does not support operation 'any'" with pytest.raises(TypeError, match=msg): df.any(axis=1) @@ -1466,18 +1466,18 @@ def test_any_all_np_func(self, func, data, expected): if any(isinstance(x, CategoricalDtype) for x in data.dtypes): with pytest.raises( - TypeError, match="dtype category does not support reduction" + TypeError, match=".* dtype category does not support operation" ): func(data) # method version with pytest.raises( - TypeError, match="dtype category does not support reduction" + TypeError, match=".* dtype category does not support operation" ): getattr(DataFrame(data), func.__name__)(axis=None) if data.dtypes.apply(lambda x: x.kind == "M").any(): # GH#34479 - msg = "datetime64 type does not support operation: '(any|all)'" + msg = "datetime64 type does not support operation '(any|all)'" with pytest.raises(TypeError, match=msg): func(data) @@ -1734,19 +1734,19 @@ def test_any_all_categorical_dtype_nuisance_column(self, all_boolean_reductions) df = ser.to_frame() # Double-check the Series behavior is to raise - with pytest.raises(TypeError, match="does not support reduction"): + with pytest.raises(TypeError, match="does not support operation"): getattr(ser, all_boolean_reductions)() - with pytest.raises(TypeError, match="does not support reduction"): + with pytest.raises(TypeError, match="does not support operation"): getattr(np, all_boolean_reductions)(ser) - with pytest.raises(TypeError, match="does not support reduction"): + with pytest.raises(TypeError, match="does not support operation"): getattr(df, all_boolean_reductions)(bool_only=False) - with pytest.raises(TypeError, match="does not support reduction"): + with pytest.raises(TypeError, match="does not support operation"): getattr(df, all_boolean_reductions)(bool_only=None) - with pytest.raises(TypeError, match="does not support reduction"): + with pytest.raises(TypeError, match="does not support operation"): getattr(np, all_boolean_reductions)(df, axis=0) def test_median_categorical_dtype_nuisance_column(self): @@ -1755,22 +1755,22 @@ def test_median_categorical_dtype_nuisance_column(self): ser = df["A"] # Double-check the Series behavior is to raise - with pytest.raises(TypeError, match="does not support reduction"): + with pytest.raises(TypeError, match="does not support operation"): ser.median() - with pytest.raises(TypeError, match="does not support reduction"): + with pytest.raises(TypeError, match="does not support operation"): df.median(numeric_only=False) - with pytest.raises(TypeError, match="does not support reduction"): + with pytest.raises(TypeError, match="does not support operation"): df.median() # same thing, but with an additional non-categorical column df["B"] = df["A"].astype(int) - with pytest.raises(TypeError, match="does not support reduction"): + with pytest.raises(TypeError, match="does not support operation"): df.median(numeric_only=False) - with pytest.raises(TypeError, match="does not support reduction"): + with pytest.raises(TypeError, match="does not support operation"): df.median() # TODO: np.median(df, axis=0) gives np.array([2.0, 2.0]) instead @@ -1964,7 +1964,7 @@ def test_minmax_extensionarray(method, numeric_only): def test_frame_mixed_numeric_object_with_timestamp(ts_value): # GH 13912 df = DataFrame({"a": [1], "b": [1.1], "c": ["foo"], "d": [ts_value]}) - with pytest.raises(TypeError, match="does not support reduction"): + with pytest.raises(TypeError, match="does not support operation"): df.sum() diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index be8f5d73fe7e8..54d7895691f3f 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -671,7 +671,7 @@ def test_raises_on_nuisance(df): df = df.loc[:, ["A", "C", "D"]] df["E"] = datetime.now() grouped = df.groupby("A") - msg = "datetime64 type does not support operation: 'sum'" + msg = "datetime64 type does not support operation 'sum'" with pytest.raises(TypeError, match=msg): grouped.agg("sum") with pytest.raises(TypeError, match=msg): @@ -1794,7 +1794,7 @@ def get_categorical_invalid_expected(): else: msg = "category type does not support" if op == "skew": - msg = "|".join([msg, "does not support reduction 'skew'"]) + msg = "|".join([msg, "does not support operation 'skew'"]) with pytest.raises(TypeError, match=msg): get_result() diff --git a/pandas/tests/groupby/test_raises.py b/pandas/tests/groupby/test_raises.py index 7af27d7227035..70be98af1289f 100644 --- a/pandas/tests/groupby/test_raises.py +++ b/pandas/tests/groupby/test_raises.py @@ -241,16 +241,16 @@ def test_groupby_raises_datetime( return klass, msg = { - "all": (TypeError, "datetime64 type does not support operation: 'all'"), - "any": (TypeError, "datetime64 type does not support operation: 'any'"), + "all": (TypeError, "datetime64 type does not support operation 'all'"), + "any": (TypeError, "datetime64 type does not support operation 'any'"), "bfill": (None, ""), "corrwith": (TypeError, "cannot perform __mul__ with this index type"), "count": (None, ""), "cumcount": (None, ""), "cummax": (None, ""), "cummin": (None, ""), - "cumprod": (TypeError, "datetime64 type does not support operation: 'cumprod'"), - "cumsum": (TypeError, "datetime64 type does not support operation: 'cumsum'"), + "cumprod": (TypeError, "datetime64 type does not support operation 'cumprod'"), + "cumsum": (TypeError, "datetime64 type does not support operation 'cumsum'"), "diff": (None, ""), "ffill": (None, ""), "fillna": (None, ""), @@ -265,7 +265,7 @@ def test_groupby_raises_datetime( "ngroup": (None, ""), "nunique": (None, ""), "pct_change": (TypeError, "cannot perform __truediv__ with this index type"), - "prod": (TypeError, "datetime64 type does not support operation: 'prod'"), + "prod": (TypeError, "datetime64 type does not support operation 'prod'"), "quantile": (None, ""), "rank": (None, ""), "sem": (None, ""), @@ -275,14 +275,14 @@ def test_groupby_raises_datetime( TypeError, "|".join( [ - r"dtype datetime64\[ns\] does not support reduction", - "datetime64 type does not support operation: 'skew'", + r"dtype datetime64\[ns\] does not support operation", + "datetime64 type does not support operation 'skew'", ] ), ), "std": (None, ""), - "sum": (TypeError, "datetime64 type does not support operation: 'sum"), - "var": (TypeError, "datetime64 type does not support operation: 'var'"), + "sum": (TypeError, "datetime64 type does not support operation 'sum"), + "var": (TypeError, "datetime64 type does not support operation 'var'"), }[groupby_func] if groupby_func == "fillna": @@ -323,7 +323,7 @@ def test_groupby_raises_datetime_np( klass, msg = { np.sum: ( TypeError, - re.escape("datetime64[us] does not support reduction 'sum'"), + re.escape("datetime64[us] does not support operation 'sum'"), ), np.mean: (None, ""), }[groupby_func_np] @@ -417,7 +417,7 @@ def test_groupby_raises_category( TypeError, "|".join( [ - "'Categorical' .* does not support reduction 'mean'", + "'Categorical' .* does not support operation 'mean'", "category dtype does not support aggregation 'mean'", ] ), @@ -426,7 +426,7 @@ def test_groupby_raises_category( TypeError, "|".join( [ - "'Categorical' .* does not support reduction 'median'", + "'Categorical' .* does not support operation 'median'", "category dtype does not support aggregation 'median'", ] ), @@ -445,7 +445,7 @@ def test_groupby_raises_category( TypeError, "|".join( [ - "'Categorical' .* does not support reduction 'sem'", + "'Categorical' .* does not support operation 'sem'", "category dtype does not support aggregation 'sem'", ] ), @@ -456,7 +456,7 @@ def test_groupby_raises_category( TypeError, "|".join( [ - "dtype category does not support reduction 'skew'", + "dtype category does not support operation 'skew'", "category type does not support skew operations", ] ), @@ -465,7 +465,7 @@ def test_groupby_raises_category( TypeError, "|".join( [ - "'Categorical' .* does not support reduction 'std'", + "'Categorical' .* does not support operation 'std'", "category dtype does not support aggregation 'std'", ] ), @@ -475,7 +475,7 @@ def test_groupby_raises_category( TypeError, "|".join( [ - "'Categorical' .* does not support reduction 'var'", + "'Categorical' .* does not support operation 'var'", "category dtype does not support aggregation 'var'", ] ), @@ -519,10 +519,10 @@ def test_groupby_raises_category_np( gb = gb["d"] klass, msg = { - np.sum: (TypeError, "dtype category does not support reduction 'sum'"), + np.sum: (TypeError, "dtype category does not support operation 'sum'"), np.mean: ( TypeError, - "dtype category does not support reduction 'mean'", + "dtype category does not support operation 'mean'", ), }[groupby_func_np] _call_and_check(klass, msg, how, gb, groupby_func_np, ()) @@ -618,7 +618,7 @@ def test_groupby_raises_category_on_category( TypeError, "|".join( [ - "'Categorical' .* does not support reduction 'sem'", + "'Categorical' .* does not support operation 'sem'", "category dtype does not support aggregation 'sem'", ] ), @@ -630,7 +630,7 @@ def test_groupby_raises_category_on_category( "|".join( [ "category type does not support skew operations", - "dtype category does not support reduction 'skew'", + "dtype category does not support operation 'skew'", ] ), ), @@ -638,7 +638,7 @@ def test_groupby_raises_category_on_category( TypeError, "|".join( [ - "'Categorical' .* does not support reduction 'std'", + "'Categorical' .* does not support operation 'std'", "category dtype does not support aggregation 'std'", ] ), @@ -648,7 +648,7 @@ def test_groupby_raises_category_on_category( TypeError, "|".join( [ - "'Categorical' .* does not support reduction 'var'", + "'Categorical' .* does not support operation 'var'", "category dtype does not support aggregation 'var'", ] ), diff --git a/pandas/tests/indexes/test_old_base.py b/pandas/tests/indexes/test_old_base.py index f41c6870cdb1c..871e7cdda4102 100644 --- a/pandas/tests/indexes/test_old_base.py +++ b/pandas/tests/indexes/test_old_base.py @@ -222,12 +222,7 @@ def test_logical_compat(self, simple_index): assert idx.any() == idx._values.any() assert idx.any() == idx.to_series().any() else: - msg = "cannot perform (any|all)" - if isinstance(idx, IntervalIndex): - msg = ( - r"'IntervalArray' with dtype interval\[.*\] does " - "not support reduction '(any|all)'" - ) + msg = "does not support operation '(any|all)'" with pytest.raises(TypeError, match=msg): idx.all() with pytest.raises(TypeError, match=msg): diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py index 048553330c1ce..5547b716b2670 100644 --- a/pandas/tests/reductions/test_reductions.py +++ b/pandas/tests/reductions/test_reductions.py @@ -378,7 +378,7 @@ def test_invalid_td64_reductions(self, opname): [ f"reduction operation '{opname}' not allowed for this dtype", rf"cannot perform {opname} with type timedelta64\[ns\]", - f"does not support reduction '{opname}'", + f"does not support operation '{opname}'", ] ) @@ -714,7 +714,7 @@ def test_ops_consistency_on_empty(self, method): [ "operation 'var' not allowed", r"cannot perform var with type timedelta64\[ns\]", - "does not support reduction 'var'", + "does not support operation 'var'", ] ) with pytest.raises(TypeError, match=msg): @@ -1010,7 +1010,7 @@ def test_any_all_datetimelike(self): df = DataFrame(ser) # GH#34479 - msg = "datetime64 type does not support operation: '(any|all)'" + msg = "datetime64 type does not support operation '(any|all)'" with pytest.raises(TypeError, match=msg): dta.all() with pytest.raises(TypeError, match=msg): diff --git a/pandas/tests/reductions/test_stat_reductions.py b/pandas/tests/reductions/test_stat_reductions.py index 60fcf8cbc142c..4af1ca1d4800a 100644 --- a/pandas/tests/reductions/test_stat_reductions.py +++ b/pandas/tests/reductions/test_stat_reductions.py @@ -99,7 +99,7 @@ def _check_stat_op( # mean, idxmax, idxmin, min, and max are valid for dates if name not in ["max", "min", "mean", "median", "std"]: ds = Series(date_range("1/1/2001", periods=10)) - msg = f"does not support reduction '{name}'" + msg = f"does not support operation '{name}'" with pytest.raises(TypeError, match=msg): f(ds) diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py index 9b442fa7dbd07..a77097fd5ce61 100644 --- a/pandas/tests/resample/test_resample_api.py +++ b/pandas/tests/resample/test_resample_api.py @@ -709,7 +709,7 @@ def test_selection_api_validation(): exp.index.name = "d" with pytest.raises( - TypeError, match="datetime64 type does not support operation: 'sum'" + TypeError, match="datetime64 type does not support operation 'sum'" ): df.resample("2D", level="d").sum() result = df.resample("2D", level="d").sum(numeric_only=True) diff --git a/pandas/tests/series/test_ufunc.py b/pandas/tests/series/test_ufunc.py index 94a6910509e2d..36a2afb2162c2 100644 --- a/pandas/tests/series/test_ufunc.py +++ b/pandas/tests/series/test_ufunc.py @@ -289,7 +289,7 @@ def test_multiply(self, values_for_np_reduce, box_with_array, request): else: msg = "|".join( [ - "does not support reduction", + "does not support operation", "unsupported operand type", "ufunc 'multiply' cannot use operands", ] @@ -319,7 +319,7 @@ def test_add(self, values_for_np_reduce, box_with_array): else: msg = "|".join( [ - "does not support reduction", + "does not support operation", "unsupported operand type", "ufunc 'add' cannot use operands", ]
xref #54566 removed unnecessary check needs_i8_conversion if Index subclass does not support any or all.
https://api.github.com/repos/pandas-dev/pandas/pulls/58006
2024-03-25T22:56:32Z
2024-04-04T16:47:01Z
2024-04-04T16:47:01Z
2024-04-04T16:47:08Z
DEPR: remove Tick.delta
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index a9967dcb8efe6..77778e8bbd859 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -1022,7 +1022,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then -i "pandas.tseries.offsets.DateOffset.rule_code GL08" \ -i "pandas.tseries.offsets.Day PR02" \ -i "pandas.tseries.offsets.Day.copy SA01" \ - -i "pandas.tseries.offsets.Day.delta GL08" \ -i "pandas.tseries.offsets.Day.freqstr SA01" \ -i "pandas.tseries.offsets.Day.is_on_offset GL08" \ -i "pandas.tseries.offsets.Day.kwds SA01" \ @@ -1075,7 +1074,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then -i "pandas.tseries.offsets.FY5253Quarter.year_has_extra_week GL08" \ -i "pandas.tseries.offsets.Hour PR02" \ -i "pandas.tseries.offsets.Hour.copy SA01" \ - -i "pandas.tseries.offsets.Hour.delta GL08" \ -i "pandas.tseries.offsets.Hour.freqstr SA01" \ -i "pandas.tseries.offsets.Hour.is_on_offset GL08" \ -i "pandas.tseries.offsets.Hour.kwds SA01" \ @@ -1098,7 +1096,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then -i "pandas.tseries.offsets.LastWeekOfMonth.weekday GL08" \ -i "pandas.tseries.offsets.Micro PR02" \ -i "pandas.tseries.offsets.Micro.copy SA01" \ - -i "pandas.tseries.offsets.Micro.delta GL08" \ -i "pandas.tseries.offsets.Micro.freqstr SA01" \ -i "pandas.tseries.offsets.Micro.is_on_offset GL08" \ -i "pandas.tseries.offsets.Micro.kwds SA01" \ @@ -1109,7 +1106,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then -i "pandas.tseries.offsets.Micro.rule_code GL08" \ -i "pandas.tseries.offsets.Milli PR02" \ -i "pandas.tseries.offsets.Milli.copy SA01" \ - -i "pandas.tseries.offsets.Milli.delta GL08" \ -i "pandas.tseries.offsets.Milli.freqstr SA01" \ -i "pandas.tseries.offsets.Milli.is_on_offset GL08" \ -i "pandas.tseries.offsets.Milli.kwds SA01" \ @@ -1120,7 +1116,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then -i "pandas.tseries.offsets.Milli.rule_code GL08" \ -i "pandas.tseries.offsets.Minute PR02" \ -i "pandas.tseries.offsets.Minute.copy SA01" \ - -i "pandas.tseries.offsets.Minute.delta GL08" \ -i "pandas.tseries.offsets.Minute.freqstr SA01" \ -i "pandas.tseries.offsets.Minute.is_on_offset GL08" \ -i "pandas.tseries.offsets.Minute.kwds SA01" \ @@ -1151,7 +1146,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then -i "pandas.tseries.offsets.MonthEnd.rule_code GL08" \ -i "pandas.tseries.offsets.Nano PR02" \ -i "pandas.tseries.offsets.Nano.copy SA01" \ - -i "pandas.tseries.offsets.Nano.delta GL08" \ -i "pandas.tseries.offsets.Nano.freqstr SA01" \ -i "pandas.tseries.offsets.Nano.is_on_offset GL08" \ -i "pandas.tseries.offsets.Nano.kwds SA01" \ @@ -1184,7 +1178,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then -i "pandas.tseries.offsets.QuarterEnd.startingMonth GL08" \ -i "pandas.tseries.offsets.Second PR02" \ -i "pandas.tseries.offsets.Second.copy SA01" \ - -i "pandas.tseries.offsets.Second.delta GL08" \ -i "pandas.tseries.offsets.Second.freqstr SA01" \ -i "pandas.tseries.offsets.Second.is_on_offset GL08" \ -i "pandas.tseries.offsets.Second.kwds SA01" \ @@ -1217,7 +1210,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then -i "pandas.tseries.offsets.SemiMonthEnd.rule_code GL08" \ -i "pandas.tseries.offsets.Tick GL08" \ -i "pandas.tseries.offsets.Tick.copy SA01" \ - -i "pandas.tseries.offsets.Tick.delta GL08" \ -i "pandas.tseries.offsets.Tick.freqstr SA01" \ -i "pandas.tseries.offsets.Tick.is_on_offset GL08" \ -i "pandas.tseries.offsets.Tick.kwds SA01" \ diff --git a/doc/source/reference/offset_frequency.rst b/doc/source/reference/offset_frequency.rst index 37eff247899be..8bb2c6ffe73be 100644 --- a/doc/source/reference/offset_frequency.rst +++ b/doc/source/reference/offset_frequency.rst @@ -1042,7 +1042,6 @@ Properties .. autosummary:: :toctree: api/ - Tick.delta Tick.freqstr Tick.kwds Tick.name @@ -1077,7 +1076,6 @@ Properties .. autosummary:: :toctree: api/ - Day.delta Day.freqstr Day.kwds Day.name @@ -1112,7 +1110,6 @@ Properties .. autosummary:: :toctree: api/ - Hour.delta Hour.freqstr Hour.kwds Hour.name @@ -1147,7 +1144,6 @@ Properties .. autosummary:: :toctree: api/ - Minute.delta Minute.freqstr Minute.kwds Minute.name @@ -1182,7 +1178,6 @@ Properties .. autosummary:: :toctree: api/ - Second.delta Second.freqstr Second.kwds Second.name @@ -1217,7 +1212,6 @@ Properties .. autosummary:: :toctree: api/ - Milli.delta Milli.freqstr Milli.kwds Milli.name @@ -1252,7 +1246,6 @@ Properties .. autosummary:: :toctree: api/ - Micro.delta Micro.freqstr Micro.kwds Micro.name @@ -1287,7 +1280,6 @@ Properties .. autosummary:: :toctree: api/ - Nano.delta Nano.freqstr Nano.kwds Nano.name diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index a398b93b60018..8b3d4fe8ff5e1 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -201,6 +201,7 @@ Removal of prior version deprecations/changes - Enforced deprecation disallowing parsing datetimes with mixed time zones unless user passes ``utc=True`` to :func:`to_datetime` (:issue:`57275`) - Enforced deprecation of :meth:`.DataFrameGroupBy.get_group` and :meth:`.SeriesGroupBy.get_group` allowing the ``name`` argument to be a non-tuple when grouping by a list of length 1 (:issue:`54155`) - Enforced deprecation of :meth:`Series.interpolate` and :meth:`DataFrame.interpolate` for object-dtype (:issue:`57820`) +- Enforced deprecation of :meth:`offsets.Tick.delta`, use ``pd.Timedelta(obj)`` instead (:issue:`55498`) - Enforced deprecation of ``axis=None`` acting the same as ``axis=0`` in the DataFrame reductions ``sum``, ``prod``, ``std``, ``var``, and ``sem``, passing ``axis=None`` will now reduce over both axes; this is particularly the case when doing e.g. ``numpy.sum(df)`` (:issue:`21597`) - Enforced deprecation of parsing system timezone strings to ``tzlocal``, which depended on system timezone, pass the 'tz' keyword instead (:issue:`50791`) - Enforced deprecation of passing a dictionary to :meth:`SeriesGroupBy.agg` (:issue:`52268`) diff --git a/pandas/_libs/tslibs/offsets.pyi b/pandas/_libs/tslibs/offsets.pyi index 791ebc0fbb245..3f942d6aa3622 100644 --- a/pandas/_libs/tslibs/offsets.pyi +++ b/pandas/_libs/tslibs/offsets.pyi @@ -20,8 +20,6 @@ from pandas._typing import ( npt, ) -from .timedeltas import Timedelta - _BaseOffsetT = TypeVar("_BaseOffsetT", bound=BaseOffset) _DatetimeT = TypeVar("_DatetimeT", bound=datetime) _TimedeltaT = TypeVar("_TimedeltaT", bound=timedelta) @@ -114,8 +112,6 @@ class Tick(SingleConstructorOffset): _prefix: str def __init__(self, n: int = ..., normalize: bool = ...) -> None: ... @property - def delta(self) -> Timedelta: ... - @property def nanos(self) -> int: ... def delta_to_tick(delta: timedelta) -> Tick: ... diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index fd18ae5908f10..e36abdf0ad971 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -957,22 +957,6 @@ cdef class Tick(SingleConstructorOffset): def _as_pd_timedelta(self): return Timedelta(self) - @property - def delta(self): - warnings.warn( - # GH#55498 - f"{type(self).__name__}.delta is deprecated and will be removed in " - "a future version. Use pd.Timedelta(obj) instead", - FutureWarning, - stacklevel=find_stack_level(), - ) - try: - return self.n * Timedelta(self._nanos_inc) - except OverflowError as err: - # GH#55503 as_unit will raise a more useful OutOfBoundsTimedelta - Timedelta(self).as_unit("ns") - raise AssertionError("This should not be reached.") - @property def nanos(self) -> int64_t: """ diff --git a/pandas/tests/tseries/offsets/test_ticks.py b/pandas/tests/tseries/offsets/test_ticks.py index c8fbdfa11991a..f91230e1460c4 100644 --- a/pandas/tests/tseries/offsets/test_ticks.py +++ b/pandas/tests/tseries/offsets/test_ticks.py @@ -16,7 +16,6 @@ import pytest from pandas._libs.tslibs.offsets import delta_to_tick -from pandas.errors import OutOfBoundsTimedelta from pandas import ( Timedelta, @@ -239,16 +238,6 @@ def test_tick_addition(kls, expected): assert result == expected -def test_tick_delta_overflow(): - # GH#55503 raise OutOfBoundsTimedelta, not OverflowError - tick = offsets.Day(10**9) - msg = "Cannot cast 1000000000 days 00:00:00 to unit='ns' without overflow" - depr_msg = "Day.delta is deprecated" - with pytest.raises(OutOfBoundsTimedelta, match=msg): - with tm.assert_produces_warning(FutureWarning, match=depr_msg): - tick.delta - - @pytest.mark.parametrize("cls", tick_classes) def test_tick_division(cls): off = cls(10)
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/58005
2024-03-25T20:56:35Z
2024-03-26T17:03:05Z
2024-03-26T17:03:05Z
2024-03-26T17:12:03Z
DEPR: remove DTA.__init__, TDA.__init__
diff --git a/ci/deps/actions-310.yaml b/ci/deps/actions-310.yaml index 1b68fa4fc22e6..ed7dfe1a3c17e 100644 --- a/ci/deps/actions-310.yaml +++ b/ci/deps/actions-310.yaml @@ -26,7 +26,7 @@ dependencies: - beautifulsoup4>=4.11.2 - blosc>=1.21.3 - bottleneck>=1.3.6 - - fastparquet>=2023.04.0 + - fastparquet>=2023.10.0 - fsspec>=2022.11.0 - html5lib>=1.1 - hypothesis>=6.46.1 diff --git a/ci/deps/actions-311-downstream_compat.yaml b/ci/deps/actions-311-downstream_compat.yaml index 893e585cb890e..dd1d341c70a9b 100644 --- a/ci/deps/actions-311-downstream_compat.yaml +++ b/ci/deps/actions-311-downstream_compat.yaml @@ -28,7 +28,7 @@ dependencies: - beautifulsoup4>=4.11.2 - blosc>=1.21.3 - bottleneck>=1.3.6 - - fastparquet>=2023.04.0 + - fastparquet>=2023.10.0 - fsspec>=2022.11.0 - html5lib>=1.1 - hypothesis>=6.46.1 diff --git a/ci/deps/actions-311.yaml b/ci/deps/actions-311.yaml index 20124b24a6b9a..388116439f944 100644 --- a/ci/deps/actions-311.yaml +++ b/ci/deps/actions-311.yaml @@ -26,7 +26,7 @@ dependencies: - beautifulsoup4>=4.11.2 - blosc>=1.21.3 - bottleneck>=1.3.6 - - fastparquet>=2023.04.0 + - fastparquet>=2023.10.0 - fsspec>=2022.11.0 - html5lib>=1.1 - hypothesis>=6.46.1 diff --git a/ci/deps/actions-312.yaml b/ci/deps/actions-312.yaml index eb70816c241bb..745b2fc5dfd2e 100644 --- a/ci/deps/actions-312.yaml +++ b/ci/deps/actions-312.yaml @@ -26,7 +26,7 @@ dependencies: - beautifulsoup4>=4.11.2 - blosc>=1.21.3 - bottleneck>=1.3.6 - - fastparquet>=2023.04.0 + - fastparquet>=2023.10.0 - fsspec>=2022.11.0 - html5lib>=1.1 - hypothesis>=6.46.1 diff --git a/ci/deps/actions-39-minimum_versions.yaml b/ci/deps/actions-39-minimum_versions.yaml index 4399aa748af5c..b760f27a3d4d3 100644 --- a/ci/deps/actions-39-minimum_versions.yaml +++ b/ci/deps/actions-39-minimum_versions.yaml @@ -29,7 +29,7 @@ dependencies: - beautifulsoup4=4.11.2 - blosc=1.21.3 - bottleneck=1.3.6 - - fastparquet=2023.04.0 + - fastparquet=2023.10.0 - fsspec=2022.11.0 - html5lib=1.1 - hypothesis=6.46.1 diff --git a/ci/deps/actions-39.yaml b/ci/deps/actions-39.yaml index 92df608f17c6c..8f235a836bb3d 100644 --- a/ci/deps/actions-39.yaml +++ b/ci/deps/actions-39.yaml @@ -26,7 +26,7 @@ dependencies: - beautifulsoup4>=4.11.2 - blosc>=1.21.3 - bottleneck>=1.3.6 - - fastparquet>=2023.04.0 + - fastparquet>=2023.10.0 - fsspec>=2022.11.0 - html5lib>=1.1 - hypothesis>=6.46.1 diff --git a/ci/deps/circle-310-arm64.yaml b/ci/deps/circle-310-arm64.yaml index 869aae8596681..ed4d139714e71 100644 --- a/ci/deps/circle-310-arm64.yaml +++ b/ci/deps/circle-310-arm64.yaml @@ -27,7 +27,7 @@ dependencies: - beautifulsoup4>=4.11.2 - blosc>=1.21.3 - bottleneck>=1.3.6 - - fastparquet>=2023.04.0 + - fastparquet>=2023.10.0 - fsspec>=2022.11.0 - html5lib>=1.1 - hypothesis>=6.46.1 diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst index 11c16dd9dabcc..3cd9e030d6b3c 100644 --- a/doc/source/getting_started/install.rst +++ b/doc/source/getting_started/install.rst @@ -361,7 +361,7 @@ Dependency Minimum Version pip extra Notes PyTables 3.8.0 hdf5 HDF5-based reading / writing blosc 1.21.3 hdf5 Compression for HDF5; only available on ``conda`` zlib hdf5 Compression for HDF5 -fastparquet 2023.04.0 - Parquet reading / writing (pyarrow is default) +fastparquet 2023.10.0 - Parquet reading / writing (pyarrow is default) pyarrow 10.0.1 parquet, feather Parquet, ORC, and feather reading / writing pyreadstat 1.2.0 spss SPSS files (.sav) reading odfpy 1.4.1 excel Open document format (.odf, .ods, .odt) reading / writing diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index fb33601263c5d..295d3d36a9c26 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -134,7 +134,7 @@ Optional libraries below the lowest tested version may still work, but are not c +------------------------+---------------------+ | Package | New Minimum Version | +========================+=====================+ -| fastparquet | 2023.04.0 | +| fastparquet | 2023.10.0 | +------------------------+---------------------+ | adbc-driver-postgresql | 0.10.0 | +------------------------+---------------------+ diff --git a/environment.yml b/environment.yml index 020154e650c5b..186d7e1d703df 100644 --- a/environment.yml +++ b/environment.yml @@ -30,7 +30,7 @@ dependencies: - beautifulsoup4>=4.11.2 - blosc - bottleneck>=1.3.6 - - fastparquet>=2023.04.0 + - fastparquet>=2023.10.0 - fsspec>=2022.11.0 - html5lib>=1.1 - hypothesis>=6.46.1 diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py index d6e01a168fba1..f4e717c26d6fd 100644 --- a/pandas/compat/_optional.py +++ b/pandas/compat/_optional.py @@ -25,7 +25,7 @@ "bs4": "4.11.2", "blosc": "1.21.3", "bottleneck": "1.3.6", - "fastparquet": "2023.04.0", + "fastparquet": "2023.10.0", "fsspec": "2022.11.0", "html5lib": "1.1", "hypothesis": "6.46.1", diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 745774b34a3ad..3dc2d77bb5a19 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -26,7 +26,6 @@ algos, lib, ) -from pandas._libs.arrays import NDArrayBacked from pandas._libs.tslibs import ( BaseOffset, IncompatibleFrequency, @@ -1936,100 +1935,6 @@ class TimelikeOps(DatetimeLikeArrayMixin): Common ops for TimedeltaIndex/DatetimeIndex, but not PeriodIndex. """ - _default_dtype: np.dtype - - def __init__( - self, values, dtype=None, freq=lib.no_default, copy: bool = False - ) -> None: - warnings.warn( - # GH#55623 - f"{type(self).__name__}.__init__ is deprecated and will be " - "removed in a future version. Use pd.array instead.", - FutureWarning, - stacklevel=find_stack_level(), - ) - if dtype is not None: - dtype = pandas_dtype(dtype) - - values = extract_array(values, extract_numpy=True) - if isinstance(values, IntegerArray): - values = values.to_numpy("int64", na_value=iNaT) - - inferred_freq = getattr(values, "_freq", None) - explicit_none = freq is None - freq = freq if freq is not lib.no_default else None - - if isinstance(values, type(self)): - if explicit_none: - # don't inherit from values - pass - elif freq is None: - freq = values.freq - elif freq and values.freq: - freq = to_offset(freq) - freq = _validate_inferred_freq(freq, values.freq) - - if dtype is not None and dtype != values.dtype: - # TODO: we only have tests for this for DTA, not TDA (2022-07-01) - raise TypeError( - f"dtype={dtype} does not match data dtype {values.dtype}" - ) - - dtype = values.dtype - values = values._ndarray - - elif dtype is None: - if isinstance(values, np.ndarray) and values.dtype.kind in "Mm": - dtype = values.dtype - else: - dtype = self._default_dtype - if isinstance(values, np.ndarray) and values.dtype == "i8": - values = values.view(dtype) - - if not isinstance(values, np.ndarray): - raise ValueError( - f"Unexpected type '{type(values).__name__}'. 'values' must be a " - f"{type(self).__name__}, ndarray, or Series or Index " - "containing one of those." - ) - if values.ndim not in [1, 2]: - raise ValueError("Only 1-dimensional input arrays are supported.") - - if values.dtype == "i8": - # for compat with datetime/timedelta/period shared methods, - # we can sometimes get here with int64 values. These represent - # nanosecond UTC (or tz-naive) unix timestamps - if dtype is None: - dtype = self._default_dtype - values = values.view(self._default_dtype) - elif lib.is_np_dtype(dtype, "mM"): - values = values.view(dtype) - elif isinstance(dtype, DatetimeTZDtype): - kind = self._default_dtype.kind - new_dtype = f"{kind}8[{dtype.unit}]" - values = values.view(new_dtype) - - dtype = self._validate_dtype(values, dtype) - - if freq == "infer": - raise ValueError( - f"Frequency inference not allowed in {type(self).__name__}.__init__. " - "Use 'pd.array()' instead." - ) - - if copy: - values = values.copy() - if freq: - freq = to_offset(freq) - if values.dtype.kind == "m" and not isinstance(freq, Tick): - raise TypeError("TimedeltaArray/Index freq must be a Tick") - - NDArrayBacked.__init__(self, values=values, dtype=dtype) - self._freq = freq - - if inferred_freq is None and freq is not None: - type(self)._validate_frequency(self, freq) - @classmethod def _validate_dtype(cls, values, dtype): raise AbstractMethodError(cls) diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index ad4611aac9e35..d446407ec3d01 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -186,7 +186,7 @@ class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps): # type: ignore[misc] Parameters ---------- - values : Series, Index, DatetimeArray, ndarray + data : Series, Index, DatetimeArray, ndarray The datetime data. For DatetimeArray `values` (or a Series or Index boxing one), @@ -287,7 +287,6 @@ def _scalar_type(self) -> type[Timestamp]: _dtype: np.dtype[np.datetime64] | DatetimeTZDtype _freq: BaseOffset | None = None - _default_dtype = DT64NS_DTYPE # used in TimeLikeOps.__init__ @classmethod def _from_scalars(cls, scalars, *, dtype: DtypeObj) -> Self: diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index c41e078095feb..6eb4d234b349d 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -113,7 +113,7 @@ class TimedeltaArray(dtl.TimelikeOps): Parameters ---------- - values : array-like + data : array-like The timedelta data. dtype : numpy.dtype @@ -196,7 +196,6 @@ def dtype(self) -> np.dtype[np.timedelta64]: # type: ignore[override] # Constructors _freq = None - _default_dtype = TD64NS_DTYPE # used in TimeLikeOps.__init__ @classmethod def _validate_dtype(cls, values, dtype): diff --git a/pandas/tests/arrays/datetimes/test_constructors.py b/pandas/tests/arrays/datetimes/test_constructors.py index 3d22427d41985..d7264c002c67f 100644 --- a/pandas/tests/arrays/datetimes/test_constructors.py +++ b/pandas/tests/arrays/datetimes/test_constructors.py @@ -16,34 +16,6 @@ def test_from_sequence_invalid_type(self): with pytest.raises(TypeError, match="Cannot create a DatetimeArray"): DatetimeArray._from_sequence(mi, dtype="M8[ns]") - def test_only_1dim_accepted(self): - arr = np.array([0, 1, 2, 3], dtype="M8[h]").astype("M8[ns]") - - depr_msg = "DatetimeArray.__init__ is deprecated" - with tm.assert_produces_warning(FutureWarning, match=depr_msg): - with pytest.raises(ValueError, match="Only 1-dimensional"): - # 3-dim, we allow 2D to sneak in for ops purposes GH#29853 - DatetimeArray(arr.reshape(2, 2, 1)) - - with tm.assert_produces_warning(FutureWarning, match=depr_msg): - with pytest.raises(ValueError, match="Only 1-dimensional"): - # 0-dim - DatetimeArray(arr[[0]].squeeze()) - - def test_freq_validation(self): - # GH#24623 check that invalid instances cannot be created with the - # public constructor - arr = np.arange(5, dtype=np.int64) * 3600 * 10**9 - - msg = ( - "Inferred frequency h from passed values does not " - "conform to passed frequency W-SUN" - ) - depr_msg = "DatetimeArray.__init__ is deprecated" - with tm.assert_produces_warning(FutureWarning, match=depr_msg): - with pytest.raises(ValueError, match=msg): - DatetimeArray(arr, freq="W") - @pytest.mark.parametrize( "meth", [ @@ -76,42 +48,9 @@ def test_from_pandas_array(self): expected = pd.date_range("1970-01-01", periods=5, freq="h")._data tm.assert_datetime_array_equal(result, expected) - def test_mismatched_timezone_raises(self): - depr_msg = "DatetimeArray.__init__ is deprecated" - with tm.assert_produces_warning(FutureWarning, match=depr_msg): - arr = DatetimeArray( - np.array(["2000-01-01T06:00:00"], dtype="M8[ns]"), - dtype=DatetimeTZDtype(tz="US/Central"), - ) - dtype = DatetimeTZDtype(tz="US/Eastern") - msg = r"dtype=datetime64\[ns.*\] does not match data dtype datetime64\[ns.*\]" - with tm.assert_produces_warning(FutureWarning, match=depr_msg): - with pytest.raises(TypeError, match=msg): - DatetimeArray(arr, dtype=dtype) - - # also with mismatched tzawareness - with tm.assert_produces_warning(FutureWarning, match=depr_msg): - with pytest.raises(TypeError, match=msg): - DatetimeArray(arr, dtype=np.dtype("M8[ns]")) - with tm.assert_produces_warning(FutureWarning, match=depr_msg): - with pytest.raises(TypeError, match=msg): - DatetimeArray(arr.tz_localize(None), dtype=arr.dtype) - - def test_non_array_raises(self): - depr_msg = "DatetimeArray.__init__ is deprecated" - with tm.assert_produces_warning(FutureWarning, match=depr_msg): - with pytest.raises(ValueError, match="list"): - DatetimeArray([1, 2, 3]) - def test_bool_dtype_raises(self): arr = np.array([1, 2, 3], dtype="bool") - depr_msg = "DatetimeArray.__init__ is deprecated" - msg = "Unexpected value for 'dtype': 'bool'. Must be" - with tm.assert_produces_warning(FutureWarning, match=depr_msg): - with pytest.raises(ValueError, match=msg): - DatetimeArray(arr) - msg = r"dtype bool cannot be converted to datetime64\[ns\]" with pytest.raises(TypeError, match=msg): DatetimeArray._from_sequence(arr, dtype="M8[ns]") @@ -122,41 +61,6 @@ def test_bool_dtype_raises(self): with pytest.raises(TypeError, match=msg): pd.to_datetime(arr) - def test_incorrect_dtype_raises(self): - depr_msg = "DatetimeArray.__init__ is deprecated" - with tm.assert_produces_warning(FutureWarning, match=depr_msg): - with pytest.raises(ValueError, match="Unexpected value for 'dtype'."): - DatetimeArray(np.array([1, 2, 3], dtype="i8"), dtype="category") - - with tm.assert_produces_warning(FutureWarning, match=depr_msg): - with pytest.raises(ValueError, match="Unexpected value for 'dtype'."): - DatetimeArray(np.array([1, 2, 3], dtype="i8"), dtype="m8[s]") - - with tm.assert_produces_warning(FutureWarning, match=depr_msg): - with pytest.raises(ValueError, match="Unexpected value for 'dtype'."): - DatetimeArray(np.array([1, 2, 3], dtype="i8"), dtype="M8[D]") - - def test_mismatched_values_dtype_units(self): - arr = np.array([1, 2, 3], dtype="M8[s]") - dtype = np.dtype("M8[ns]") - msg = "Values resolution does not match dtype." - depr_msg = "DatetimeArray.__init__ is deprecated" - - with tm.assert_produces_warning(FutureWarning, match=depr_msg): - with pytest.raises(ValueError, match=msg): - DatetimeArray(arr, dtype=dtype) - - dtype2 = DatetimeTZDtype(tz="UTC", unit="ns") - with tm.assert_produces_warning(FutureWarning, match=depr_msg): - with pytest.raises(ValueError, match=msg): - DatetimeArray(arr, dtype=dtype2) - - def test_freq_infer_raises(self): - depr_msg = "DatetimeArray.__init__ is deprecated" - with tm.assert_produces_warning(FutureWarning, match=depr_msg): - with pytest.raises(ValueError, match="Frequency inference"): - DatetimeArray(np.array([1, 2, 3], dtype="i8"), freq="infer") - def test_copy(self): data = np.array([1, 2, 3], dtype="M8[ns]") arr = DatetimeArray._from_sequence(data, copy=False) diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index b6ae1a9df0e65..971c5bf487104 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -1320,12 +1320,6 @@ def test_from_pandas_array(dtype): cls = {"M8[ns]": DatetimeArray, "m8[ns]": TimedeltaArray}[dtype] - depr_msg = f"{cls.__name__}.__init__ is deprecated" - with tm.assert_produces_warning(FutureWarning, match=depr_msg): - result = cls(arr) - expected = cls(data) - tm.assert_extension_array_equal(result, expected) - result = cls._from_sequence(arr, dtype=dtype) expected = cls._from_sequence(data, dtype=dtype) tm.assert_extension_array_equal(result, expected) diff --git a/pandas/tests/arrays/timedeltas/test_constructors.py b/pandas/tests/arrays/timedeltas/test_constructors.py index 91b6f7fa222f9..ee29f505fd7b1 100644 --- a/pandas/tests/arrays/timedeltas/test_constructors.py +++ b/pandas/tests/arrays/timedeltas/test_constructors.py @@ -1,45 +1,10 @@ import numpy as np import pytest -import pandas._testing as tm from pandas.core.arrays import TimedeltaArray class TestTimedeltaArrayConstructor: - def test_only_1dim_accepted(self): - # GH#25282 - arr = np.array([0, 1, 2, 3], dtype="m8[h]").astype("m8[ns]") - - depr_msg = "TimedeltaArray.__init__ is deprecated" - with tm.assert_produces_warning(FutureWarning, match=depr_msg): - with pytest.raises(ValueError, match="Only 1-dimensional"): - # 3-dim, we allow 2D to sneak in for ops purposes GH#29853 - TimedeltaArray(arr.reshape(2, 2, 1)) - - with tm.assert_produces_warning(FutureWarning, match=depr_msg): - with pytest.raises(ValueError, match="Only 1-dimensional"): - # 0-dim - TimedeltaArray(arr[[0]].squeeze()) - - def test_freq_validation(self): - # ensure that the public constructor cannot create an invalid instance - arr = np.array([0, 0, 1], dtype=np.int64) * 3600 * 10**9 - - msg = ( - "Inferred frequency None from passed values does not " - "conform to passed frequency D" - ) - depr_msg = "TimedeltaArray.__init__ is deprecated" - with tm.assert_produces_warning(FutureWarning, match=depr_msg): - with pytest.raises(ValueError, match=msg): - TimedeltaArray(arr.view("timedelta64[ns]"), freq="D") - - def test_non_array_raises(self): - depr_msg = "TimedeltaArray.__init__ is deprecated" - with tm.assert_produces_warning(FutureWarning, match=depr_msg): - with pytest.raises(ValueError, match="list"): - TimedeltaArray([1, 2, 3]) - def test_other_type_raises(self): msg = r"dtype bool cannot be converted to timedelta64\[ns\]" with pytest.raises(TypeError, match=msg): @@ -78,16 +43,6 @@ def test_incorrect_dtype_raises(self): np.array([1, 2, 3], dtype="i8"), dtype=np.dtype("m8[Y]") ) - def test_mismatched_values_dtype_units(self): - arr = np.array([1, 2, 3], dtype="m8[s]") - dtype = np.dtype("m8[ns]") - msg = r"Values resolution does not match dtype" - depr_msg = "TimedeltaArray.__init__ is deprecated" - - with tm.assert_produces_warning(FutureWarning, match=depr_msg): - with pytest.raises(ValueError, match=msg): - TimedeltaArray(arr, dtype=dtype) - def test_copy(self): data = np.array([1, 2, 3], dtype="m8[ns]") arr = TimedeltaArray._from_sequence(data, copy=False) diff --git a/pandas/tests/indexes/timedeltas/test_constructors.py b/pandas/tests/indexes/timedeltas/test_constructors.py index 2f97ab6be8965..895ea110c8ad5 100644 --- a/pandas/tests/indexes/timedeltas/test_constructors.py +++ b/pandas/tests/indexes/timedeltas/test_constructors.py @@ -56,7 +56,6 @@ def test_infer_from_tdi_mismatch(self): # has one and it does not match the `freq` input tdi = timedelta_range("1 second", periods=100, freq="1s") - depr_msg = "TimedeltaArray.__init__ is deprecated" msg = ( "Inferred frequency .* from passed values does " "not conform to passed frequency" @@ -64,18 +63,9 @@ def test_infer_from_tdi_mismatch(self): with pytest.raises(ValueError, match=msg): TimedeltaIndex(tdi, freq="D") - with pytest.raises(ValueError, match=msg): - # GH#23789 - with tm.assert_produces_warning(FutureWarning, match=depr_msg): - TimedeltaArray(tdi, freq="D") - with pytest.raises(ValueError, match=msg): TimedeltaIndex(tdi._data, freq="D") - with pytest.raises(ValueError, match=msg): - with tm.assert_produces_warning(FutureWarning, match=depr_msg): - TimedeltaArray(tdi._data, freq="D") - def test_dt64_data_invalid(self): # GH#23539 # passing tz-aware DatetimeIndex raises, naive or ndarray[datetime64] @@ -240,11 +230,6 @@ def test_explicit_none_freq(self): result = TimedeltaIndex(tdi._data, freq=None) assert result.freq is None - msg = "TimedeltaArray.__init__ is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg): - tda = TimedeltaArray(tdi, freq=None) - assert tda.freq is None - def test_from_categorical(self): tdi = timedelta_range(1, periods=5) diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py index a4fd29878a2d1..ee26fdae74960 100644 --- a/pandas/tests/test_downstream.py +++ b/pandas/tests/test_downstream.py @@ -20,10 +20,6 @@ TimedeltaIndex, ) import pandas._testing as tm -from pandas.core.arrays import ( - DatetimeArray, - TimedeltaArray, -) @pytest.fixture @@ -284,14 +280,6 @@ def test_from_obscure_array(dtype, box): else: data = box(arr) - cls = {"M8[ns]": DatetimeArray, "m8[ns]": TimedeltaArray}[dtype] - - depr_msg = f"{cls.__name__}.__init__ is deprecated" - with tm.assert_produces_warning(FutureWarning, match=depr_msg): - expected = cls(arr) - result = cls._from_sequence(data, dtype=dtype) - tm.assert_extension_array_equal(result, expected) - if not isinstance(data, memoryview): # FIXME(GH#44431) these raise on memoryview and attempted fix # fails on py3.10 diff --git a/pyproject.toml b/pyproject.toml index 84d6eca552b54..5f5b013ca8461 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -91,7 +91,7 @@ all = ['adbc-driver-postgresql>=0.10.0', # blosc only available on conda (https://github.com/Blosc/python-blosc/issues/297) #'blosc>=1.21.3', 'bottleneck>=1.3.6', - 'fastparquet>=2023.04.0', + 'fastparquet>=2023.10.0', 'fsspec>=2022.11.0', 'gcsfs>=2022.11.0', 'html5lib>=1.1', diff --git a/requirements-dev.txt b/requirements-dev.txt index 0ea0eba369158..a42ee1587961a 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -19,7 +19,7 @@ pytz beautifulsoup4>=4.11.2 blosc bottleneck>=1.3.6 -fastparquet>=2023.04.0 +fastparquet>=2023.10.0 fsspec>=2022.11.0 html5lib>=1.1 hypothesis>=6.46.1
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/58004
2024-03-25T20:55:49Z
2024-03-26T20:32:57Z
2024-03-26T20:32:57Z
2024-03-26T22:35:00Z
DEPR: enforce deprecation of DTI/TDI unused keywords
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index a9967dcb8efe6..b3a694de20103 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -504,7 +504,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then -i "pandas.Timedelta.to_timedelta64 SA01" \ -i "pandas.Timedelta.total_seconds SA01" \ -i "pandas.Timedelta.view SA01" \ - -i "pandas.TimedeltaIndex PR01" \ -i "pandas.TimedeltaIndex.as_unit RT03,SA01" \ -i "pandas.TimedeltaIndex.ceil SA01" \ -i "pandas.TimedeltaIndex.components SA01" \ diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index 4d2381ae1e5e4..5cb8c3c0f54d1 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -195,6 +195,8 @@ Removal of prior version deprecations/changes - :meth:`SeriesGroupBy.agg` no longer pins the name of the group to the input passed to the provided ``func`` (:issue:`51703`) - All arguments except ``name`` in :meth:`Index.rename` are now keyword only (:issue:`56493`) - All arguments except the first ``path``-like argument in IO writers are now keyword only (:issue:`54229`) +- Removed the "closed" and "normalize" keywords in :meth:`DatetimeIndex.__new__` (:issue:`52628`) +- Removed the "closed" and "unit" keywords in :meth:`TimedeltaIndex.__new__` (:issue:`52628`, :issue:`55499`) - All arguments in :meth:`Index.sort_values` are now keyword only (:issue:`56493`) - All arguments in :meth:`Series.to_dict` are now keyword only (:issue:`56493`) - Changed the default value of ``observed`` in :meth:`DataFrame.groupby` and :meth:`Series.groupby` to ``True`` (:issue:`51811`) diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 2d773c04b8ea9..cefdc14145d1f 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -28,7 +28,6 @@ cache_readonly, doc, ) -from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import is_scalar from pandas.core.dtypes.dtypes import DatetimeTZDtype @@ -150,17 +149,6 @@ class DatetimeIndex(DatetimeTimedeltaMixin): inferred frequency upon creation. tz : pytz.timezone or dateutil.tz.tzfile or datetime.tzinfo or str Set the Timezone of the data. - normalize : bool, default False - Normalize start/end dates to midnight before generating date range. - - .. deprecated:: 2.1.0 - - closed : {'left', 'right'}, optional - Set whether to include `start` and `end` that are on the - boundary. The default includes boundary points on either end. - - .. deprecated:: 2.1.0 - ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise' When clocks moved backward due to DST, ambiguous times may arise. For example in Central European Time (UTC+01), when going from 03:00 @@ -322,8 +310,6 @@ def __new__( data=None, freq: Frequency | lib.NoDefault = lib.no_default, tz=lib.no_default, - normalize: bool | lib.NoDefault = lib.no_default, - closed=lib.no_default, ambiguous: TimeAmbiguous = "raise", dayfirst: bool = False, yearfirst: bool = False, @@ -331,23 +317,6 @@ def __new__( copy: bool = False, name: Hashable | None = None, ) -> Self: - if closed is not lib.no_default: - # GH#52628 - warnings.warn( - f"The 'closed' keyword in {cls.__name__} construction is " - "deprecated and will be removed in a future version.", - FutureWarning, - stacklevel=find_stack_level(), - ) - if normalize is not lib.no_default: - # GH#52628 - warnings.warn( - f"The 'normalize' keyword in {cls.__name__} construction is " - "deprecated and will be removed in a future version.", - FutureWarning, - stacklevel=find_stack_level(), - ) - if is_scalar(data): cls._raise_scalar_data_error(data) diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 6a2c04b0ddf51..8af5a56f43c57 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -3,7 +3,6 @@ from __future__ import annotations from typing import TYPE_CHECKING -import warnings from pandas._libs import ( index as libindex, @@ -14,8 +13,6 @@ Timedelta, to_offset, ) -from pandas._libs.tslibs.timedeltas import disallow_ambiguous_unit -from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_scalar, @@ -63,12 +60,6 @@ class TimedeltaIndex(DatetimeTimedeltaMixin): ---------- data : array-like (1-dimensional), optional Optional timedelta-like data to construct index with. - unit : {'D', 'h', 'm', 's', 'ms', 'us', 'ns'}, optional - The unit of ``data``. - - .. deprecated:: 2.2.0 - Use ``pd.to_timedelta`` instead. - freq : str or pandas offset object, optional One of pandas date offset strings or corresponding objects. The string ``'infer'`` can be passed in order to set the frequency of the index as @@ -151,40 +142,16 @@ def _resolution_obj(self) -> Resolution | None: # type: ignore[override] def __new__( cls, data=None, - unit=lib.no_default, freq=lib.no_default, - closed=lib.no_default, dtype=None, copy: bool = False, name=None, ): - if closed is not lib.no_default: - # GH#52628 - warnings.warn( - f"The 'closed' keyword in {cls.__name__} construction is " - "deprecated and will be removed in a future version.", - FutureWarning, - stacklevel=find_stack_level(), - ) - - if unit is not lib.no_default: - # GH#55499 - warnings.warn( - f"The 'unit' keyword in {cls.__name__} construction is " - "deprecated and will be removed in a future version. " - "Use pd.to_timedelta instead.", - FutureWarning, - stacklevel=find_stack_level(), - ) - else: - unit = None - name = maybe_extract_name(name, data, cls) if is_scalar(data): cls._raise_scalar_data_error(data) - disallow_ambiguous_unit(unit) if dtype is not None: dtype = pandas_dtype(dtype) @@ -211,7 +178,7 @@ def __new__( # - Cases checked above all return/raise before reaching here - # tdarr = TimedeltaArray._from_sequence_not_strict( - data, freq=freq, unit=unit, dtype=dtype, copy=copy + data, freq=freq, unit=None, dtype=dtype, copy=copy ) refs = None if not copy and isinstance(data, (ABCSeries, Index)): diff --git a/pandas/tests/indexes/datetimes/test_constructors.py b/pandas/tests/indexes/datetimes/test_constructors.py index 48bbfc1a9f646..4be45e834ce31 100644 --- a/pandas/tests/indexes/datetimes/test_constructors.py +++ b/pandas/tests/indexes/datetimes/test_constructors.py @@ -35,18 +35,6 @@ class TestDatetimeIndex: - def test_closed_deprecated(self): - # GH#52628 - msg = "The 'closed' keyword" - with tm.assert_produces_warning(FutureWarning, match=msg): - DatetimeIndex([], closed=True) - - def test_normalize_deprecated(self): - # GH#52628 - msg = "The 'normalize' keyword" - with tm.assert_produces_warning(FutureWarning, match=msg): - DatetimeIndex([], normalize=True) - def test_from_dt64_unsupported_unit(self): # GH#49292 val = np.datetime64(1, "D") diff --git a/pandas/tests/indexes/timedeltas/test_constructors.py b/pandas/tests/indexes/timedeltas/test_constructors.py index 0510700bb64d7..2f97ab6be8965 100644 --- a/pandas/tests/indexes/timedeltas/test_constructors.py +++ b/pandas/tests/indexes/timedeltas/test_constructors.py @@ -15,12 +15,6 @@ class TestTimedeltaIndex: - def test_closed_deprecated(self): - # GH#52628 - msg = "The 'closed' keyword" - with tm.assert_produces_warning(FutureWarning, match=msg): - TimedeltaIndex([], closed=True) - def test_array_of_dt64_nat_raises(self): # GH#39462 nat = np.datetime64("NaT", "ns") @@ -36,14 +30,6 @@ def test_array_of_dt64_nat_raises(self): with pytest.raises(TypeError, match=msg): to_timedelta(arr) - @pytest.mark.parametrize("unit", ["Y", "y", "M"]) - def test_unit_m_y_raises(self, unit): - msg = "Units 'M', 'Y', and 'y' are no longer supported" - depr_msg = "The 'unit' keyword in TimedeltaIndex construction is deprecated" - with pytest.raises(ValueError, match=msg): - with tm.assert_produces_warning(FutureWarning, match=depr_msg): - TimedeltaIndex([1, 3, 7], unit) - def test_int64_nocopy(self): # GH#23539 check that a copy isn't made when we pass int64 data # and copy=False @@ -138,9 +124,6 @@ def test_construction_base_constructor(self): tm.assert_index_equal(pd.Index(arr), TimedeltaIndex(arr)) tm.assert_index_equal(pd.Index(np.array(arr)), TimedeltaIndex(np.array(arr))) - @pytest.mark.filterwarnings( - "ignore:The 'unit' keyword in TimedeltaIndex construction:FutureWarning" - ) def test_constructor(self): expected = TimedeltaIndex( [ @@ -162,22 +145,6 @@ def test_constructor(self): ) tm.assert_index_equal(result, expected) - expected = TimedeltaIndex( - ["0 days 00:00:00", "0 days 00:00:01", "0 days 00:00:02"] - ) - result = TimedeltaIndex(range(3), unit="s") - tm.assert_index_equal(result, expected) - expected = TimedeltaIndex( - ["0 days 00:00:00", "0 days 00:00:05", "0 days 00:00:09"] - ) - result = TimedeltaIndex([0, 5, 9], unit="s") - tm.assert_index_equal(result, expected) - expected = TimedeltaIndex( - ["0 days 00:00:00.400", "0 days 00:00:00.450", "0 days 00:00:01.200"] - ) - result = TimedeltaIndex([400, 450, 1200], unit="ms") - tm.assert_index_equal(result, expected) - def test_constructor_iso(self): # GH #21877 expected = timedelta_range("1s", periods=9, freq="s") diff --git a/pandas/tests/scalar/timedelta/test_constructors.py b/pandas/tests/scalar/timedelta/test_constructors.py index c69f572c92bf2..5509216f4daf4 100644 --- a/pandas/tests/scalar/timedelta/test_constructors.py +++ b/pandas/tests/scalar/timedelta/test_constructors.py @@ -126,30 +126,26 @@ def test_unit_parser(self, unit, np_unit, wrapper): ) # TODO(2.0): the desired output dtype may have non-nano resolution - msg = "The 'unit' keyword in TimedeltaIndex construction is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg): - result = to_timedelta(wrapper(range(5)), unit=unit) - tm.assert_index_equal(result, expected) - result = TimedeltaIndex(wrapper(range(5)), unit=unit) - tm.assert_index_equal(result, expected) - - str_repr = [f"{x}{unit}" for x in np.arange(5)] - result = to_timedelta(wrapper(str_repr)) - tm.assert_index_equal(result, expected) - result = to_timedelta(wrapper(str_repr)) - tm.assert_index_equal(result, expected) - - # scalar - expected = Timedelta(np.timedelta64(2, np_unit).astype("timedelta64[ns]")) - result = to_timedelta(2, unit=unit) - assert result == expected - result = Timedelta(2, unit=unit) - assert result == expected - - result = to_timedelta(f"2{unit}") - assert result == expected - result = Timedelta(f"2{unit}") - assert result == expected + result = to_timedelta(wrapper(range(5)), unit=unit) + tm.assert_index_equal(result, expected) + + str_repr = [f"{x}{unit}" for x in np.arange(5)] + result = to_timedelta(wrapper(str_repr)) + tm.assert_index_equal(result, expected) + result = to_timedelta(wrapper(str_repr)) + tm.assert_index_equal(result, expected) + + # scalar + expected = Timedelta(np.timedelta64(2, np_unit).astype("timedelta64[ns]")) + result = to_timedelta(2, unit=unit) + assert result == expected + result = Timedelta(2, unit=unit) + assert result == expected + + result = to_timedelta(f"2{unit}") + assert result == expected + result = Timedelta(f"2{unit}") + assert result == expected @pytest.mark.parametrize("unit", ["T", "t", "L", "l", "U", "u", "N", "n"]) def test_unit_T_L_N_U_raises(self, unit):
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/58003
2024-03-25T20:46:33Z
2024-03-26T17:07:02Z
2024-03-26T17:07:02Z
2024-03-26T17:11:35Z
DEPR: Enforce deprecation of parsing to tzlocal
diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index 4d2381ae1e5e4..bb856936cd96d 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -202,6 +202,7 @@ Removal of prior version deprecations/changes - Enforced deprecation of :meth:`.DataFrameGroupBy.get_group` and :meth:`.SeriesGroupBy.get_group` allowing the ``name`` argument to be a non-tuple when grouping by a list of length 1 (:issue:`54155`) - Enforced deprecation of :meth:`Series.interpolate` and :meth:`DataFrame.interpolate` for object-dtype (:issue:`57820`) - Enforced deprecation of ``axis=None`` acting the same as ``axis=0`` in the DataFrame reductions ``sum``, ``prod``, ``std``, ``var``, and ``sem``, passing ``axis=None`` will now reduce over both axes; this is particularly the case when doing e.g. ``numpy.sum(df)`` (:issue:`21597`) +- Enforced deprecation of parsing system timezone strings to ``tzlocal``, which depended on system timezone, pass the 'tz' keyword instead (:issue:`50791`) - Enforced deprecation of passing a dictionary to :meth:`SeriesGroupBy.agg` (:issue:`52268`) - Enforced deprecation of string ``AS`` denoting frequency in :class:`YearBegin` and strings ``AS-DEC``, ``AS-JAN``, etc. denoting annual frequencies with various fiscal year starts (:issue:`57793`) - Enforced deprecation of string ``A`` denoting frequency in :class:`YearEnd` and strings ``A-DEC``, ``A-JAN``, etc. denoting annual frequencies with various fiscal year ends (:issue:`57699`) diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx index 94c549cbd3db0..384df1cac95eb 100644 --- a/pandas/_libs/tslibs/parsing.pyx +++ b/pandas/_libs/tslibs/parsing.pyx @@ -45,7 +45,6 @@ from decimal import InvalidOperation from dateutil.parser import DEFAULTPARSER from dateutil.tz import ( - tzlocal as _dateutil_tzlocal, tzoffset, tzutc as _dateutil_tzutc, ) @@ -703,17 +702,12 @@ cdef datetime dateutil_parse( if res.tzname and res.tzname in time.tzname: # GH#50791 if res.tzname != "UTC": - # If the system is localized in UTC (as many CI runs are) - # we get tzlocal, once the deprecation is enforced will get - # timezone.utc, not raise. - warnings.warn( + raise ValueError( f"Parsing '{res.tzname}' as tzlocal (dependent on system timezone) " - "is deprecated and will raise in a future version. Pass the 'tz' " + "is no longer supported. Pass the 'tz' " "keyword or call tz_localize after construction instead", - FutureWarning, - stacklevel=find_stack_level() ) - ret = ret.replace(tzinfo=_dateutil_tzlocal()) + ret = ret.replace(tzinfo=timezone.utc) elif res.tzoffset == 0: ret = ret.replace(tzinfo=_dateutil_tzutc()) elif res.tzoffset: diff --git a/pandas/tests/tslibs/test_parsing.py b/pandas/tests/tslibs/test_parsing.py index d1b0595dd50e6..52af5adb686a7 100644 --- a/pandas/tests/tslibs/test_parsing.py +++ b/pandas/tests/tslibs/test_parsing.py @@ -6,7 +6,6 @@ import re from dateutil.parser import parse as du_parse -from dateutil.tz import tzlocal from hypothesis import given import numpy as np import pytest @@ -22,6 +21,10 @@ ) import pandas.util._test_decorators as td +# Usually we wouldn't want this import in this test file (which is targeted at +# tslibs.parsing), but it is convenient to test the Timestamp constructor at +# the same time as the other parsing functions. +from pandas import Timestamp import pandas._testing as tm from pandas._testing._hypothesis import DATETIME_NO_TZ @@ -33,20 +36,21 @@ def test_parsing_tzlocal_deprecated(): # GH#50791 msg = ( - "Parsing 'EST' as tzlocal.*" + r"Parsing 'EST' as tzlocal \(dependent on system timezone\) " + r"is no longer supported\. " "Pass the 'tz' keyword or call tz_localize after construction instead" ) dtstr = "Jan 15 2004 03:00 EST" with tm.set_timezone("US/Eastern"): - with tm.assert_produces_warning(FutureWarning, match=msg): - res, _ = parse_datetime_string_with_reso(dtstr) + with pytest.raises(ValueError, match=msg): + parse_datetime_string_with_reso(dtstr) - assert isinstance(res.tzinfo, tzlocal) + with pytest.raises(ValueError, match=msg): + parsing.py_parse_datetime_string(dtstr) - with tm.assert_produces_warning(FutureWarning, match=msg): - res = parsing.py_parse_datetime_string(dtstr) - assert isinstance(res.tzinfo, tzlocal) + with pytest.raises(ValueError, match=msg): + Timestamp(dtstr) def test_parse_datetime_string_with_reso():
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/58002
2024-03-25T20:45:34Z
2024-03-25T22:33:52Z
2024-03-25T22:33:52Z
2024-03-26T01:28:50Z
DEPR: remove Categorical.to_list
diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index 4d2381ae1e5e4..f3729fb697bea 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -211,6 +211,7 @@ Removal of prior version deprecations/changes - Enforced deprecation of strings ``T``, ``L``, ``U``, and ``N`` denoting units in :class:`Timedelta` (:issue:`57627`) - Enforced deprecation of the behavior of :func:`concat` when ``len(keys) != len(objs)`` would truncate to the shorter of the two. Now this raises a ``ValueError`` (:issue:`43485`) - Enforced deprecation of values "pad", "ffill", "bfill", and "backfill" for :meth:`Series.interpolate` and :meth:`DataFrame.interpolate` (:issue:`57869`) +- Enforced deprecation removing :meth:`Categorical.to_list`, use ``obj.tolist()`` instead (:issue:`51254`) - Enforced silent-downcasting deprecation for :ref:`all relevant methods <whatsnew_220.silent_downcasting>` (:issue:`54710`) - In :meth:`DataFrame.stack`, the default value of ``future_stack`` is now ``True``; specifying ``False`` will raise a ``FutureWarning`` (:issue:`55448`) - Iterating over a :class:`.DataFrameGroupBy` or :class:`.SeriesGroupBy` will return tuples of length 1 for the groups when grouping by ``level`` a list of length 1 (:issue:`50064`) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 429dc9236cf45..416331a260e9f 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -626,19 +626,6 @@ def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike: return result - def to_list(self) -> list: - """ - Alias for tolist. - """ - # GH#51254 - warnings.warn( - "Categorical.to_list is deprecated and will be removed in a future " - "version. Use obj.tolist() instead", - FutureWarning, - stacklevel=find_stack_level(), - ) - return self.tolist() - @classmethod def _from_inferred_categories( cls, inferred_categories, inferred_codes, dtype, true_values=None diff --git a/pandas/tests/arrays/categorical/test_api.py b/pandas/tests/arrays/categorical/test_api.py index cff8afaa17516..2791fd55f54d7 100644 --- a/pandas/tests/arrays/categorical/test_api.py +++ b/pandas/tests/arrays/categorical/test_api.py @@ -18,13 +18,6 @@ class TestCategoricalAPI: - def test_to_list_deprecated(self): - # GH#51254 - cat1 = Categorical(list("acb"), ordered=False) - msg = "Categorical.to_list is deprecated and will be removed" - with tm.assert_produces_warning(FutureWarning, match=msg): - cat1.to_list() - def test_ordered_api(self): # GH 9347 cat1 = Categorical(list("acb"), ordered=False)
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/58000
2024-03-25T20:37:56Z
2024-03-25T22:17:41Z
2024-03-25T22:17:40Z
2024-03-26T01:29:02Z
DEPR: Enforce datetimelike deprecations
diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py index 69697906e493e..7d5b250c7b157 100644 --- a/asv_bench/benchmarks/categoricals.py +++ b/asv_bench/benchmarks/categoricals.py @@ -24,7 +24,7 @@ def setup(self): self.codes = np.tile(range(len(self.categories)), N) self.datetimes = pd.Series( - pd.date_range("1995-01-01 00:00:00", periods=N / 10, freq="s") + pd.date_range("1995-01-01 00:00:00", periods=N // 10, freq="s") ) self.datetimes_with_nat = self.datetimes.copy() self.datetimes_with_nat.iloc[-1] = pd.NaT diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py index 06f488f7baaaf..8deec502898d9 100644 --- a/asv_bench/benchmarks/timeseries.py +++ b/asv_bench/benchmarks/timeseries.py @@ -29,7 +29,7 @@ def setup(self, index_type): "dst": date_range( start="10/29/2000 1:00:00", end="10/29/2000 1:59:59", freq="s" ), - "repeated": date_range(start="2000", periods=N / 10, freq="s").repeat(10), + "repeated": date_range(start="2000", periods=N // 10, freq="s").repeat(10), "tz_aware": date_range(start="2000", periods=N, freq="s", tz="US/Eastern"), "tz_local": date_range( start="2000", periods=N, freq="s", tz=dateutil.tz.tzlocal() diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index d2d5707f32bf3..003f3ea513c8d 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -221,7 +221,8 @@ Removal of prior version deprecations/changes - All arguments in :meth:`Series.to_dict` are now keyword only (:issue:`56493`) - Changed the default value of ``observed`` in :meth:`DataFrame.groupby` and :meth:`Series.groupby` to ``True`` (:issue:`51811`) - Enforce deprecation in :func:`testing.assert_series_equal` and :func:`testing.assert_frame_equal` with object dtype and mismatched null-like values, which are now considered not-equal (:issue:`18463`) -- Enforced deprecation ``all`` and ``any`` reductions with ``datetime64`` and :class:`DatetimeTZDtype` dtypes (:issue:`58029`) +- Enforced deprecation ``all`` and ``any`` reductions with ``datetime64``, :class:`DatetimeTZDtype`, and :class:`PeriodDtype` dtypes (:issue:`58029`) +- Enforced deprecation disallowing ``float`` "periods" in :func:`date_range`, :func:`period_range`, :func:`timedelta_range`, :func:`interval_range`, (:issue:`56036`) - Enforced deprecation disallowing parsing datetimes with mixed time zones unless user passes ``utc=True`` to :func:`to_datetime` (:issue:`57275`) - Enforced deprecation in :meth:`Series.value_counts` and :meth:`Index.value_counts` with object dtype performing dtype inference on the ``.index`` of the result (:issue:`56161`) - Enforced deprecation of :meth:`.DataFrameGroupBy.get_group` and :meth:`.SeriesGroupBy.get_group` allowing the ``name`` argument to be a non-tuple when grouping by a list of length 1 (:issue:`54155`) diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index d46810e6ebbdd..c9aeaa1ce21c3 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -1661,8 +1661,14 @@ def _groupby_op( dtype = self.dtype if dtype.kind == "M": # Adding/multiplying datetimes is not valid - if how in ["any", "all", "sum", "prod", "cumsum", "cumprod", "var", "skew"]: + if how in ["sum", "prod", "cumsum", "cumprod", "var", "skew"]: raise TypeError(f"datetime64 type does not support operation '{how}'") + if how in ["any", "all"]: + # GH#34479 + raise TypeError( + f"'{how}' with datetime64 dtypes is no longer supported. " + f"Use (obj != pd.Timestamp(0)).{how}() instead." + ) elif isinstance(dtype, PeriodDtype): # Adding/multiplying Periods is not valid @@ -1670,11 +1676,9 @@ def _groupby_op( raise TypeError(f"Period type does not support {how} operations") if how in ["any", "all"]: # GH#34479 - warnings.warn( - f"'{how}' with PeriodDtype is deprecated and will raise in a " - f"future version. Use (obj != pd.Period(0, freq)).{how}() instead.", - FutureWarning, - stacklevel=find_stack_level(), + raise TypeError( + f"'{how}' with PeriodDtype is no longer supported. " + f"Use (obj != pd.Period(0, freq)).{how}() instead." ) else: # timedeltas we can add but not multiply @@ -2424,17 +2428,17 @@ def validate_periods(periods: None) -> None: ... @overload -def validate_periods(periods: int | float) -> int: ... +def validate_periods(periods: int) -> int: ... -def validate_periods(periods: int | float | None) -> int | None: +def validate_periods(periods: int | None) -> int | None: """ If a `periods` argument is passed to the Datetime/Timedelta Array/Index constructor, cast it to an integer. Parameters ---------- - periods : None, float, int + periods : None, int Returns ------- @@ -2443,22 +2447,13 @@ def validate_periods(periods: int | float | None) -> int | None: Raises ------ TypeError - if periods is None, float, or int + if periods is not None or int """ - if periods is not None: - if lib.is_float(periods): - warnings.warn( - # GH#56036 - "Non-integer 'periods' in pd.date_range, pd.timedelta_range, " - "pd.period_range, and pd.interval_range are deprecated and " - "will raise in a future version.", - FutureWarning, - stacklevel=find_stack_level(), - ) - periods = int(periods) - elif not lib.is_integer(periods): - raise TypeError(f"periods must be a number, got {periods}") - return periods + if periods is not None and not lib.is_integer(periods): + raise TypeError(f"periods must be an integer, got {periods}") + # error: Incompatible return value type (got "int | integer[Any] | None", + # expected "int | None") + return periods # type: ignore[return-value] def _validate_inferred_freq( diff --git a/pandas/tests/groupby/test_raises.py b/pandas/tests/groupby/test_raises.py index 70be98af1289f..9301f8d56d9d2 100644 --- a/pandas/tests/groupby/test_raises.py +++ b/pandas/tests/groupby/test_raises.py @@ -241,8 +241,8 @@ def test_groupby_raises_datetime( return klass, msg = { - "all": (TypeError, "datetime64 type does not support operation 'all'"), - "any": (TypeError, "datetime64 type does not support operation 'any'"), + "all": (TypeError, "'all' with datetime64 dtypes is no longer supported"), + "any": (TypeError, "'any' with datetime64 dtypes is no longer supported"), "bfill": (None, ""), "corrwith": (TypeError, "cannot perform __mul__ with this index type"), "count": (None, ""), diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py index 43fcfd1e59670..99d05dd0f26e4 100644 --- a/pandas/tests/indexes/datetimes/test_date_range.py +++ b/pandas/tests/indexes/datetimes/test_date_range.py @@ -135,16 +135,14 @@ def test_date_range_name(self): assert idx.name == "TEST" def test_date_range_invalid_periods(self): - msg = "periods must be a number, got foo" + msg = "periods must be an integer, got foo" with pytest.raises(TypeError, match=msg): date_range(start="1/1/2000", periods="foo", freq="D") def test_date_range_fractional_period(self): - msg = "Non-integer 'periods' in pd.date_range, pd.timedelta_range" - with tm.assert_produces_warning(FutureWarning, match=msg): - rng = date_range("1/1/2000", periods=10.5) - exp = date_range("1/1/2000", periods=10) - tm.assert_index_equal(rng, exp) + msg = "periods must be an integer" + with pytest.raises(TypeError, match=msg): + date_range("1/1/2000", periods=10.5) @pytest.mark.parametrize( "freq,freq_depr", @@ -1042,7 +1040,7 @@ def test_constructor(self): bdate_range(START, periods=20, freq=BDay()) bdate_range(end=START, periods=20, freq=BDay()) - msg = "periods must be a number, got B" + msg = "periods must be an integer, got B" with pytest.raises(TypeError, match=msg): date_range("2011-1-1", "2012-1-1", "B") @@ -1120,7 +1118,7 @@ def test_constructor(self): bdate_range(START, periods=20, freq=CDay()) bdate_range(end=START, periods=20, freq=CDay()) - msg = "periods must be a number, got C" + msg = "periods must be an integer, got C" with pytest.raises(TypeError, match=msg): date_range("2011-1-1", "2012-1-1", "C") diff --git a/pandas/tests/indexes/interval/test_interval_range.py b/pandas/tests/indexes/interval/test_interval_range.py index 7aea481b49221..5252b85ad8d0e 100644 --- a/pandas/tests/indexes/interval/test_interval_range.py +++ b/pandas/tests/indexes/interval/test_interval_range.py @@ -236,11 +236,10 @@ def test_interval_dtype(self, start, end, expected): def test_interval_range_fractional_period(self): # float value for periods - expected = interval_range(start=0, periods=10) - msg = "Non-integer 'periods' in pd.date_range, .* pd.interval_range" - with tm.assert_produces_warning(FutureWarning, match=msg): - result = interval_range(start=0, periods=10.5) - tm.assert_index_equal(result, expected) + msg = "periods must be an integer, got 10.5" + ts = Timestamp("2024-03-25") + with pytest.raises(TypeError, match=msg): + interval_range(ts, periods=10.5) def test_constructor_coverage(self): # equivalent timestamp-like start/end @@ -340,7 +339,7 @@ def test_errors(self): interval_range(start=Timedelta("1 day"), end=Timedelta("10 days"), freq=2) # invalid periods - msg = "periods must be a number, got foo" + msg = "periods must be an integer, got foo" with pytest.raises(TypeError, match=msg): interval_range(start=0, periods="foo") diff --git a/pandas/tests/indexes/period/test_constructors.py b/pandas/tests/indexes/period/test_constructors.py index ec2216c102c3f..6aba9f17326ba 100644 --- a/pandas/tests/indexes/period/test_constructors.py +++ b/pandas/tests/indexes/period/test_constructors.py @@ -196,11 +196,9 @@ def test_constructor_invalid_quarters(self): ) def test_period_range_fractional_period(self): - msg = "Non-integer 'periods' in pd.date_range, pd.timedelta_range" - with tm.assert_produces_warning(FutureWarning, match=msg): - result = period_range("2007-01", periods=10.5, freq="M") - exp = period_range("2007-01", periods=10, freq="M") - tm.assert_index_equal(result, exp) + msg = "periods must be an integer, got 10.5" + with pytest.raises(TypeError, match=msg): + period_range("2007-01", periods=10.5, freq="M") def test_constructor_with_without_freq(self): # GH53687 diff --git a/pandas/tests/indexes/period/test_period_range.py b/pandas/tests/indexes/period/test_period_range.py index fb200d071951e..67f4d7421df23 100644 --- a/pandas/tests/indexes/period/test_period_range.py +++ b/pandas/tests/indexes/period/test_period_range.py @@ -70,7 +70,7 @@ def test_start_end_non_nat(self): def test_periods_requires_integer(self): # invalid periods param - msg = "periods must be a number, got foo" + msg = "periods must be an integer, got foo" with pytest.raises(TypeError, match=msg): period_range(start="2017Q1", periods="foo") diff --git a/pandas/tests/indexes/timedeltas/test_constructors.py b/pandas/tests/indexes/timedeltas/test_constructors.py index 895ea110c8ad5..12ac5dd63bd8c 100644 --- a/pandas/tests/indexes/timedeltas/test_constructors.py +++ b/pandas/tests/indexes/timedeltas/test_constructors.py @@ -143,14 +143,12 @@ def test_constructor_iso(self): tm.assert_index_equal(result, expected) def test_timedelta_range_fractional_period(self): - msg = "Non-integer 'periods' in pd.date_range, pd.timedelta_range" - with tm.assert_produces_warning(FutureWarning, match=msg): - rng = timedelta_range("1 days", periods=10.5) - exp = timedelta_range("1 days", periods=10) - tm.assert_index_equal(rng, exp) + msg = "periods must be an integer" + with pytest.raises(TypeError, match=msg): + timedelta_range("1 days", periods=10.5) def test_constructor_coverage(self): - msg = "periods must be a number, got foo" + msg = "periods must be an integer, got foo" with pytest.raises(TypeError, match=msg): timedelta_range(start="1 days", periods="foo", freq="D")
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/57999
2024-03-25T20:30:17Z
2024-04-05T17:06:14Z
2024-04-05T17:06:14Z
2024-04-05T17:06:22Z
CI: Enable pytables and numba in 312 build
diff --git a/ci/deps/actions-312.yaml b/ci/deps/actions-312.yaml index eb70816c241bb..cbff1875783d4 100644 --- a/ci/deps/actions-312.yaml +++ b/ci/deps/actions-312.yaml @@ -34,7 +34,7 @@ dependencies: - jinja2>=3.1.2 - lxml>=4.9.2 - matplotlib>=3.6.3 - # - numba>=0.56.4 + - numba>=0.56.4 - numexpr>=2.8.4 - odfpy>=1.4.1 - qtpy>=2.3.0 @@ -44,7 +44,7 @@ dependencies: - pyarrow>=10.0.1 - pymysql>=1.0.2 - pyreadstat>=1.2.0 - # - pytables>=3.8.0 + - pytables>=3.8.0 - python-calamine>=0.1.7 - pyxlsb>=1.0.10 - s3fs>=2022.11.0 diff --git a/pandas/tests/io/pytables/test_append.py b/pandas/tests/io/pytables/test_append.py index b722a7f179479..7f7f7eccb2382 100644 --- a/pandas/tests/io/pytables/test_append.py +++ b/pandas/tests/io/pytables/test_append.py @@ -6,6 +6,7 @@ import pytest from pandas._libs.tslibs import Timestamp +from pandas.compat import PY312 import pandas as pd from pandas import ( @@ -283,7 +284,7 @@ def test_append_all_nans(setup_path): tm.assert_frame_equal(store["df2"], df, check_index_type=True) -def test_append_frame_column_oriented(setup_path): +def test_append_frame_column_oriented(setup_path, request): with ensure_clean_store(setup_path) as store: # column oriented df = DataFrame( @@ -303,6 +304,13 @@ def test_append_frame_column_oriented(setup_path): tm.assert_frame_equal(expected, result) # selection on the non-indexable + request.applymarker( + pytest.mark.xfail( + PY312, + reason="AST change in PY312", + raises=ValueError, + ) + ) result = store.select("df1", ("columns=A", "index=df.index[0:4]")) expected = df.reindex(columns=["A"], index=df.index[0:4]) tm.assert_frame_equal(expected, result) diff --git a/pandas/tests/io/pytables/test_select.py b/pandas/tests/io/pytables/test_select.py index 0e303d1c890c5..752e2fc570023 100644 --- a/pandas/tests/io/pytables/test_select.py +++ b/pandas/tests/io/pytables/test_select.py @@ -2,6 +2,7 @@ import pytest from pandas._libs.tslibs import Timestamp +from pandas.compat import PY312 import pandas as pd from pandas import ( @@ -168,7 +169,7 @@ def test_select(setup_path): tm.assert_frame_equal(expected, result) -def test_select_dtypes(setup_path): +def test_select_dtypes(setup_path, request): with ensure_clean_store(setup_path) as store: # with a Timestamp data column (GH #2637) df = DataFrame( @@ -279,6 +280,13 @@ def test_select_dtypes(setup_path): expected = df[df["A"] > 0] store.append("df", df, data_columns=True) + request.applymarker( + pytest.mark.xfail( + PY312, + reason="AST change in PY312", + raises=ValueError, + ) + ) np_zero = np.float64(0) # noqa: F841 result = store.select("df", where=["A>np_zero"]) tm.assert_frame_equal(expected, result) @@ -607,7 +615,7 @@ def test_select_iterator_many_empty_frames(setup_path): assert len(results) == 0 -def test_frame_select(setup_path): +def test_frame_select(setup_path, request): df = DataFrame( np.random.default_rng(2).standard_normal((10, 4)), columns=Index(list("ABCD"), dtype=object), @@ -624,6 +632,13 @@ def test_frame_select(setup_path): crit2 = "columns=['A', 'D']" crit3 = "columns=A" + request.applymarker( + pytest.mark.xfail( + PY312, + reason="AST change in PY312", + raises=TypeError, + ) + ) result = store.select("frame", [crit1, crit2]) expected = df.loc[date:, ["A", "D"]] tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py index fda385685da19..e62df0bc1c977 100644 --- a/pandas/tests/io/pytables/test_store.py +++ b/pandas/tests/io/pytables/test_store.py @@ -7,6 +7,8 @@ import numpy as np import pytest +from pandas.compat import PY312 + import pandas as pd from pandas import ( DataFrame, @@ -866,7 +868,7 @@ def test_start_stop_fixed(setup_path): df.iloc[8:10, -2] = np.nan -def test_select_filter_corner(setup_path): +def test_select_filter_corner(setup_path, request): df = DataFrame(np.random.default_rng(2).standard_normal((50, 100))) df.index = [f"{c:3d}" for c in df.index] df.columns = [f"{c:3d}" for c in df.columns] @@ -874,6 +876,13 @@ def test_select_filter_corner(setup_path): with ensure_clean_store(setup_path) as store: store.put("frame", df, format="table") + request.applymarker( + pytest.mark.xfail( + PY312, + reason="AST change in PY312", + raises=ValueError, + ) + ) crit = "columns=df.columns[:75]" result = store.select("frame", [crit]) tm.assert_frame_equal(result, df.loc[:, df.columns[:75]])
Looks like there's conda packages for this Python version now
https://api.github.com/repos/pandas-dev/pandas/pulls/57998
2024-03-25T18:50:39Z
2024-03-26T21:34:28Z
2024-03-26T21:34:28Z
2024-03-26T21:34:31Z
REF: Use numpy set methods in interpolate
diff --git a/pandas/core/missing.py b/pandas/core/missing.py index b3e152e36a304..9fef78d9f8c3d 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -471,20 +471,20 @@ def _interpolate_1d( if valid.all(): return - # These are sets of index pointers to invalid values... i.e. {0, 1, etc... - all_nans = set(np.flatnonzero(invalid)) + # These index pointers to invalid values... i.e. {0, 1, etc... + all_nans = np.flatnonzero(invalid) first_valid_index = find_valid_index(how="first", is_valid=valid) if first_valid_index is None: # no nan found in start first_valid_index = 0 - start_nans = set(range(first_valid_index)) + start_nans = np.arange(first_valid_index) last_valid_index = find_valid_index(how="last", is_valid=valid) if last_valid_index is None: # no nan found in end last_valid_index = len(yvalues) - end_nans = set(range(1 + last_valid_index, len(valid))) + end_nans = np.arange(1 + last_valid_index, len(valid)) - # Like the sets above, preserve_nans contains indices of invalid values, + # preserve_nans contains indices of invalid values, # but in this case, it is the final set of indices that need to be # preserved as NaN after the interpolation. @@ -493,27 +493,25 @@ def _interpolate_1d( # are more than 'limit' away from the prior non-NaN. # set preserve_nans based on direction using _interp_limit - preserve_nans: list | set if limit_direction == "forward": - preserve_nans = start_nans | set(_interp_limit(invalid, limit, 0)) + preserve_nans = np.union1d(start_nans, _interp_limit(invalid, limit, 0)) elif limit_direction == "backward": - preserve_nans = end_nans | set(_interp_limit(invalid, 0, limit)) + preserve_nans = np.union1d(end_nans, _interp_limit(invalid, 0, limit)) else: # both directions... just use _interp_limit - preserve_nans = set(_interp_limit(invalid, limit, limit)) + preserve_nans = np.unique(_interp_limit(invalid, limit, limit)) # if limit_area is set, add either mid or outside indices # to preserve_nans GH #16284 if limit_area == "inside": # preserve NaNs on the outside - preserve_nans |= start_nans | end_nans + preserve_nans = np.union1d(preserve_nans, start_nans) + preserve_nans = np.union1d(preserve_nans, end_nans) elif limit_area == "outside": # preserve NaNs on the inside - mid_nans = all_nans - start_nans - end_nans - preserve_nans |= mid_nans - - # sort preserve_nans and convert to list - preserve_nans = sorted(preserve_nans) + mid_nans = np.setdiff1d(all_nans, start_nans, assume_unique=True) + mid_nans = np.setdiff1d(mid_nans, end_nans, assume_unique=True) + preserve_nans = np.union1d(preserve_nans, mid_nans) is_datetimelike = yvalues.dtype.kind in "mM" @@ -1027,7 +1025,7 @@ def clean_reindex_fill_method(method) -> ReindexMethod | None: def _interp_limit( invalid: npt.NDArray[np.bool_], fw_limit: int | None, bw_limit: int | None -): +) -> np.ndarray: """ Get indexers of values that won't be filled because they exceed the limits. @@ -1059,20 +1057,23 @@ def _interp_limit(invalid, fw_limit, bw_limit): # 1. operate on the reversed array # 2. subtract the returned indices from N - 1 N = len(invalid) - f_idx = set() - b_idx = set() + f_idx = np.array([], dtype=np.int64) + b_idx = np.array([], dtype=np.int64) + assume_unique = True def inner(invalid, limit: int): limit = min(limit, N) - windowed = _rolling_window(invalid, limit + 1).all(1) - idx = set(np.where(windowed)[0] + limit) | set( - np.where((~invalid[: limit + 1]).cumsum() == 0)[0] + windowed = np.lib.stride_tricks.sliding_window_view(invalid, limit + 1).all(1) + idx = np.union1d( + np.where(windowed)[0] + limit, + np.where((~invalid[: limit + 1]).cumsum() == 0)[0], ) return idx if fw_limit is not None: if fw_limit == 0: - f_idx = set(np.where(invalid)[0]) + f_idx = np.where(invalid)[0] + assume_unique = False else: f_idx = inner(invalid, fw_limit) @@ -1082,26 +1083,8 @@ def inner(invalid, limit: int): # just use forwards return f_idx else: - b_idx_inv = list(inner(invalid[::-1], bw_limit)) - b_idx = set(N - 1 - np.asarray(b_idx_inv)) + b_idx = N - 1 - inner(invalid[::-1], bw_limit) if fw_limit == 0: return b_idx - return f_idx & b_idx - - -def _rolling_window(a: npt.NDArray[np.bool_], window: int) -> npt.NDArray[np.bool_]: - """ - [True, True, False, True, False], 2 -> - - [ - [True, True], - [True, False], - [False, True], - [True, False], - ] - """ - # https://stackoverflow.com/a/6811241 - shape = a.shape[:-1] + (a.shape[-1] - window + 1, window) - strides = a.strides + (a.strides[-1],) - return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides) + return np.intersect1d(f_idx, b_idx, assume_unique=assume_unique)
`_interpolate_1d` goes back an forth between Python objects and Numpy objects in order to use Python `set` methods. Refactoring to just use Numpy set routines instead
https://api.github.com/repos/pandas-dev/pandas/pulls/57997
2024-03-25T18:34:39Z
2024-03-26T17:11:04Z
2024-03-26T17:11:04Z
2024-03-26T17:11:08Z
CLN: `pandas.concat` internal checks
diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index 35a08e0167924..b1f662b6f231f 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -635,16 +635,13 @@ def _get_concat_axis(self) -> Index: indexes, self.keys, self.levels, self.names ) - self._maybe_check_integrity(concat_axis) - - return concat_axis - - def _maybe_check_integrity(self, concat_index: Index) -> None: if self.verify_integrity: - if not concat_index.is_unique: - overlap = concat_index[concat_index.duplicated()].unique() + if not concat_axis.is_unique: + overlap = concat_axis[concat_axis.duplicated()].unique() raise ValueError(f"Indexes have overlapping values: {overlap}") + return concat_axis + def _clean_keys_and_objs( objs: Iterable[Series | DataFrame] | Mapping[HashableT, Series | DataFrame], @@ -742,6 +739,12 @@ def _concat_indexes(indexes) -> Index: return indexes[0].append(indexes[1:]) +def validate_unique_levels(levels: list[Index]) -> None: + for level in levels: + if not level.is_unique: + raise ValueError(f"Level values not unique: {level.tolist()}") + + def _make_concat_multiindex(indexes, keys, levels=None, names=None) -> MultiIndex: if (levels is None and isinstance(keys[0], tuple)) or ( levels is not None and len(levels) > 1 @@ -754,6 +757,7 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None) -> MultiInde _, levels = factorize_from_iterables(zipped) else: levels = [ensure_index(x) for x in levels] + validate_unique_levels(levels) else: zipped = [keys] if names is None: @@ -763,12 +767,9 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None) -> MultiInde levels = [ensure_index(keys).unique()] else: levels = [ensure_index(x) for x in levels] + validate_unique_levels(levels) - for level in levels: - if not level.is_unique: - raise ValueError(f"Level values not unique: {level.tolist()}") - - if not all_indexes_same(indexes) or not all(level.is_unique for level in levels): + if not all_indexes_same(indexes): codes_list = [] # things are potentially different sizes, so compute the exact codes
* Only validate unique `levels` when needed * Inline single used `_maybe_check_integrity` check
https://api.github.com/repos/pandas-dev/pandas/pulls/57996
2024-03-25T18:27:31Z
2024-03-26T21:34:19Z
2024-03-26T21:34:19Z
2024-03-26T21:34:37Z
BUG: encoding is ignored for read_csv on FileLike objects - FIX - GH#57954
diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index b234a6b78e051..3bdfab8d2e669 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -11,6 +11,7 @@ defaultdict, ) import csv +from io import TextIOBase import sys from textwrap import fill from typing import ( @@ -655,6 +656,14 @@ def _read( else: chunksize = validate_integer("chunksize", chunksize, 1) + encoding = kwds.get("encoding") + if ( + encoding + and isinstance(filepath_or_buffer, TextIOBase) + and filepath_or_buffer.encoding != encoding + ): + raise ValueError("File's encoding does not match with given encoding") + nrows = kwds.get("nrows", None) # Check for duplicates in names. diff --git a/pandas/tests/io/parser/common/test_read_errors.py b/pandas/tests/io/parser/common/test_read_errors.py index 0827f64dccf46..39ed55930138f 100644 --- a/pandas/tests/io/parser/common/test_read_errors.py +++ b/pandas/tests/io/parser/common/test_read_errors.py @@ -8,6 +8,7 @@ from io import StringIO import os from pathlib import Path +import uuid import numpy as np import pytest @@ -322,3 +323,25 @@ def test_on_bad_lines_warn_correct_formatting(all_parsers): ): result = parser.read_csv(StringIO(data), on_bad_lines="warn") tm.assert_frame_equal(result, expected) + + +def test_filetype_encoding_miss_match_with_given_encoding(all_parsers): + # GH#57954 + + data = """ +A,B +Ü,Ä +""" + parser = all_parsers + path = f"__{uuid.uuid4()}__.csv" + + with tm.ensure_clean(path) as path: + bytes_data = data.encode("latin1") + + with open(path, "wb") as f: + f.write(bytes_data) + msg = "File's encoding does not match with given encoding" + err = ValueError + with pytest.raises(err, match=msg): + with open(path) as f: + parser.read_csv(f, encoding="latin1", on_bad_lines="warn")
- [x] closes #57954 - [x] [Tests added and passed - [x] All [code checks passed] - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/57992
2024-03-25T07:53:17Z
2024-03-25T11:58:23Z
null
2024-03-25T11:58:28Z
DOC: Fix reference to rows in `read_csv(index_col)` error message
diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py index 3bbb7c83345e5..5a7d117b0543e 100644 --- a/pandas/io/parsers/base_parser.py +++ b/pandas/io/parsers/base_parser.py @@ -174,7 +174,7 @@ def __init__(self, kwds) -> None: and all(map(is_integer, self.index_col)) ): raise ValueError( - "index_col must only contain row numbers " + "index_col must only contain integers of column positions " "when specifying a multi-index header" ) else: diff --git a/pandas/tests/io/parser/test_header.py b/pandas/tests/io/parser/test_header.py index d185e83bfc027..85ce55b3bcf83 100644 --- a/pandas/tests/io/parser/test_header.py +++ b/pandas/tests/io/parser/test_header.py @@ -162,7 +162,7 @@ def test_header_multi_index(all_parsers): {"index_col": ["foo", "bar"]}, ( "index_col must only contain " - "row numbers when specifying " + "integers of column positions when specifying " "a multi-index header" ), ),
**Description** The user passes column numbers (not row numbers) as the `index_col` argument to specify an index when calling e.g. `read_csv()`. **Checklist** - [x] ~closes #xxxx (Replace xxxx with the GitHub issue number)~ (No Specific Issue) - [x] ~[Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature~ (No change to functionality) - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] ~Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.~ (No new args/methods/functions). - [x] ~Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.~ (No change to functionality)
https://api.github.com/repos/pandas-dev/pandas/pulls/57991
2024-03-25T04:18:00Z
2024-03-26T17:07:56Z
2024-03-26T17:07:56Z
2024-03-26T17:19:46Z
DOC: ecosystem.md: add pygwalker, add seaborn code example
diff --git a/web/pandas/community/ecosystem.md b/web/pandas/community/ecosystem.md index 715a2fafbe87a..6cd67302b2a0e 100644 --- a/web/pandas/community/ecosystem.md +++ b/web/pandas/community/ecosystem.md @@ -82,6 +82,20 @@ pd.set_option("plotting.backend", "pandas_bokeh") It is very similar to the matplotlib plotting backend, but provides interactive web-based charts and maps. +### [pygwalker](https://github.com/Kanaries/pygwalker) + +PyGWalker is an interactive data visualization and +exploratory data analysis tool built upon Graphic Walker +with support for visualization, cleaning, and annotation workflows. + +pygwalker can save interactively created charts +to Graphic-Walker and Vega-Lite JSON. + +``` +import pygwalker as pyg +pyg.walk(df) +``` + ### [seaborn](https://seaborn.pydata.org) Seaborn is a Python visualization library based on @@ -94,6 +108,11 @@ pandas with the option to perform statistical estimation while plotting, aggregating across observations and visualizing the fit of statistical models to emphasize patterns in a dataset. +``` +import seaborn as sns +sns.set_theme() +``` + ### [plotnine](https://github.com/has2k1/plotnine/) Hadley Wickham's [ggplot2](https://ggplot2.tidyverse.org/) is a
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/57990
2024-03-25T02:12:12Z
2024-03-27T19:00:51Z
2024-03-27T19:00:51Z
2024-03-27T21:13:59Z
BUG: Fix error for `boxplot` when using a pre-grouped `DataFrame` with more than one grouping
diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index 74a19472ec835..15e85d0f90c5e 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -409,7 +409,7 @@ Period Plotting ^^^^^^^^ -- +- Bug in :meth:`.DataFrameGroupBy.boxplot` failed when there were multiple groupings (:issue:`14701`) - Groupby/resample/rolling diff --git a/pandas/plotting/_matplotlib/boxplot.py b/pandas/plotting/_matplotlib/boxplot.py index b41e03d87b275..75b24cd42e062 100644 --- a/pandas/plotting/_matplotlib/boxplot.py +++ b/pandas/plotting/_matplotlib/boxplot.py @@ -533,14 +533,14 @@ def boxplot_frame_groupby( ) axes = flatten_axes(axes) - ret = pd.Series(dtype=object) - + data = {} for (key, group), ax in zip(grouped, axes): d = group.boxplot( ax=ax, column=column, fontsize=fontsize, rot=rot, grid=grid, **kwds ) ax.set_title(pprint_thing(key)) - ret.loc[key] = d + data[key] = d + ret = pd.Series(data) maybe_adjust_figure(fig, bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2) else: keys, frames = zip(*grouped) diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py index 2dd45a9abc7a5..f8029a1c1ee40 100644 --- a/pandas/tests/plotting/test_boxplot_method.py +++ b/pandas/tests/plotting/test_boxplot_method.py @@ -740,3 +740,17 @@ def test_boxplot_multiindex_column(self): expected_xticklabel = ["(bar, one)", "(bar, two)"] result_xticklabel = [x.get_text() for x in axes.get_xticklabels()] assert expected_xticklabel == result_xticklabel + + @pytest.mark.parametrize("group", ["X", ["X", "Y"]]) + def test_boxplot_multi_groupby_groups(self, group): + # GH 14701 + rows = 20 + df = DataFrame( + np.random.default_rng(12).normal(size=(rows, 2)), columns=["Col1", "Col2"] + ) + df["X"] = Series(np.repeat(["A", "B"], int(rows / 2))) + df["Y"] = Series(np.tile(["C", "D"], int(rows / 2))) + grouped = df.groupby(group) + _check_plot_works(df.boxplot, by=group, default_axes=True) + _check_plot_works(df.plot.box, by=group, default_axes=True) + _check_plot_works(grouped.boxplot, default_axes=True)
- [x] closes #14701 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/v3.0.0.rst` file if fixing a bug or adding a new feature. The original bug shows an indexing error when using `boxplot` on a pre-grouped `DataFrame`. For example: `df.groupby(['X', 'Y']).boxplot()` This error occurs as multiple groupings are passed as a tuple into the index of a `Series`, which is not allowed. The fix converts the `tuple` to a `string`, which is acceptable as an index to a `Series`. There are other comments on the bug thread that state "Groupby boxplot is not working even when a single 'by' is used", which is not the case. I have included in the tests both a single `by` and multiple `by` covering three different methods of plotting a `boxplot` from a `DataFrame`: 1) `df.boxplot(by='X')` 2) `df.boxplot(by=['X','Y'])` 3) `df.plot.box(by='X')` 4) `df.plot.box(by=['X','Y'])` 5) `df.groupby('X').boxplot()` 6) **`df.groupby(['X','Y']).boxplot()`** Of all of the above, only example 6 required the fix.
https://api.github.com/repos/pandas-dev/pandas/pulls/57985
2024-03-24T18:26:01Z
2024-03-31T15:00:23Z
2024-03-31T15:00:23Z
2024-03-31T16:08:06Z
CLN: Enforce deprecations for EA.fillna
diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index f225d384888e3..76cb3cb1f2e81 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -34,7 +34,6 @@ Other enhancements - Allow dictionaries to be passed to :meth:`pandas.Series.str.replace` via ``pat`` parameter (:issue:`51748`) - Support passing a :class:`Series` input to :func:`json_normalize` that retains the :class:`Series` :class:`Index` (:issue:`51452`) - Users can globally disable any ``PerformanceWarning`` by setting the option ``mode.performance_warnings`` to ``False`` (:issue:`56920`) -- .. --------------------------------------------------------------------------- .. _whatsnew_300.notable_bug_fixes: @@ -258,6 +257,7 @@ Removal of prior version deprecations/changes - Unrecognized timezones when parsing strings to datetimes now raises a ``ValueError`` (:issue:`51477`) - Removed the :class:`Grouper` attributes ``ax``, ``groups``, ``indexer``, and ``obj`` (:issue:`51206`, :issue:`51182`) - Removed deprecated keyword ``verbose`` on :func:`read_csv` and :func:`read_table` (:issue:`56556`) +- Removed the ``method`` keyword in ``ExtensionArray.fillna``, implement ``ExtensionArray._pad_or_backfill`` instead (:issue:`53621`) - Removed the attribute ``dtypes`` from :class:`.DataFrameGroupBy` (:issue:`51997`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py index c1d0ade572e8a..7f4e6f6666382 100644 --- a/pandas/core/arrays/_mixins.py +++ b/pandas/core/arrays/_mixins.py @@ -33,7 +33,6 @@ from pandas.util._decorators import doc from pandas.util._validators import ( validate_bool_kwarg, - validate_fillna_kwargs, validate_insert_loc, ) @@ -336,13 +335,7 @@ def _pad_or_backfill( return new_values @doc(ExtensionArray.fillna) - def fillna( - self, value=None, method=None, limit: int | None = None, copy: bool = True - ) -> Self: - value, method = validate_fillna_kwargs( - value, method, validate_scalar_dict_value=False - ) - + def fillna(self, value=None, limit: int | None = None, copy: bool = True) -> Self: mask = self.isna() # error: Argument 2 to "check_value_size" has incompatible type # "ExtensionArray"; expected "ndarray" @@ -353,25 +346,12 @@ def fillna( ) if mask.any(): - if method is not None: - # (for now) when self.ndim == 2, we assume axis=0 - func = missing.get_fill_func(method, ndim=self.ndim) - npvalues = self._ndarray.T - if copy: - npvalues = npvalues.copy() - func(npvalues, limit=limit, mask=mask.T) - npvalues = npvalues.T - - # TODO: NumpyExtensionArray didn't used to copy, need tests - # for this - new_values = self._from_backing_data(npvalues) + # fill with value + if copy: + new_values = self.copy() else: - # fill with value - if copy: - new_values = self.copy() - else: - new_values = self[:] - new_values[mask] = value + new_values = self[:] + new_values[mask] = value else: # We validate the fill_value even if there is nothing to fill if value is not None: diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index aaf43662ebde2..84b62563605ac 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -29,7 +29,6 @@ pa_version_under13p0, ) from pandas.util._decorators import doc -from pandas.util._validators import validate_fillna_kwargs from pandas.core.dtypes.cast import ( can_hold_element, @@ -1068,6 +1067,7 @@ def _pad_or_backfill( # a kernel for duration types. pass + # TODO: Why do we no longer need the above cases? # TODO(3.0): after EA.fillna 'method' deprecation is enforced, we can remove # this method entirely. return super()._pad_or_backfill( @@ -1078,21 +1078,15 @@ def _pad_or_backfill( def fillna( self, value: object | ArrayLike | None = None, - method: FillnaOptions | None = None, limit: int | None = None, copy: bool = True, ) -> Self: - value, method = validate_fillna_kwargs(value, method) - if not self._hasna: # TODO(CoW): Not necessary anymore when CoW is the default return self.copy() if limit is not None: - return super().fillna(value=value, method=method, limit=limit, copy=copy) - - if method is not None: - return super().fillna(method=method, limit=limit, copy=copy) + return super().fillna(value=value, limit=limit, copy=copy) if isinstance(value, (np.ndarray, ExtensionArray)): # Similar to check_value_size, but we do not mask here since we may @@ -1118,7 +1112,7 @@ def fillna( # a kernel for duration types. pass - return super().fillna(value=value, method=method, limit=limit, copy=copy) + return super().fillna(value=value, limit=limit, copy=copy) def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]: # short-circuit to return all False array. diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 86831f072bb8f..76615704f2e33 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -38,7 +38,6 @@ from pandas.util._exceptions import find_stack_level from pandas.util._validators import ( validate_bool_kwarg, - validate_fillna_kwargs, validate_insert_loc, ) @@ -1007,31 +1006,6 @@ def _pad_or_backfill( [<NA>, 2, 2, 3, <NA>, <NA>] Length: 6, dtype: Int64 """ - - # If a 3rd-party EA has implemented this functionality in fillna, - # we warn that they need to implement _pad_or_backfill instead. - if ( - type(self).fillna is not ExtensionArray.fillna - and type(self)._pad_or_backfill is ExtensionArray._pad_or_backfill - ): - # Check for _pad_or_backfill here allows us to call - # super()._pad_or_backfill without getting this warning - warnings.warn( - "ExtensionArray.fillna 'method' keyword is deprecated. " - "In a future version. arr._pad_or_backfill will be called " - "instead. 3rd-party ExtensionArray authors need to implement " - "_pad_or_backfill.", - DeprecationWarning, - stacklevel=find_stack_level(), - ) - if limit_area is not None: - raise NotImplementedError( - f"{type(self).__name__} does not implement limit_area " - "(added in pandas 2.2). 3rd-party ExtnsionArray authors " - "need to add this argument to _pad_or_backfill." - ) - return self.fillna(method=method, limit=limit) - mask = self.isna() if mask.any(): @@ -1057,8 +1031,7 @@ def _pad_or_backfill( def fillna( self, - value: object | ArrayLike | None = None, - method: FillnaOptions | None = None, + value: object | ArrayLike, limit: int | None = None, copy: bool = True, ) -> Self: @@ -1071,14 +1044,6 @@ def fillna( If a scalar value is passed it is used to fill all missing values. Alternatively, an array-like "value" can be given. It's expected that the array-like have the same length as 'self'. - method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None - Method to use for filling holes in reindexed Series: - - * pad / ffill: propagate last valid observation forward to next valid. - * backfill / bfill: use NEXT valid observation to fill gap. - - .. deprecated:: 2.1.0 - limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is @@ -1086,9 +1051,6 @@ def fillna( be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. - - .. deprecated:: 2.1.0 - copy : bool, default True Whether to make a copy of the data before filling. If False, then the original should be modified and no new memory should be allocated. @@ -1110,16 +1072,6 @@ def fillna( [0, 0, 2, 3, 0, 0] Length: 6, dtype: Int64 """ - if method is not None: - warnings.warn( - f"The 'method' keyword in {type(self).__name__}.fillna is " - "deprecated and will be removed in a future version.", - FutureWarning, - stacklevel=find_stack_level(), - ) - - value, method = validate_fillna_kwargs(value, method) - mask = self.isna() # error: Argument 2 to "check_value_size" has incompatible type # "ExtensionArray"; expected "ndarray" @@ -1130,24 +1082,12 @@ def fillna( ) if mask.any(): - if method is not None: - meth = missing.clean_fill_method(method) - - npmask = np.asarray(mask) - if meth == "pad": - indexer = libalgos.get_fill_indexer(npmask, limit=limit) - return self.take(indexer, allow_fill=True) - else: - # i.e. meth == "backfill" - indexer = libalgos.get_fill_indexer(npmask[::-1], limit=limit)[::-1] - return self[::-1].take(indexer, allow_fill=True) + # fill with value + if not copy: + new_values = self[:] else: - # fill with value - if not copy: - new_values = self[:] - else: - new_values = self.copy() - new_values[mask] = value + new_values = self.copy() + new_values[mask] = value else: if not copy: new_values = self[:] diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 1ea32584403ba..56ea28c0b50f8 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -29,7 +29,6 @@ ArrayLike, AxisInt, Dtype, - FillnaOptions, IntervalClosedType, NpDtype, PositionalIndexer, @@ -894,23 +893,7 @@ def max(self, *, axis: AxisInt | None = None, skipna: bool = True) -> IntervalOr indexer = obj.argsort()[-1] return obj[indexer] - def _pad_or_backfill( # pylint: disable=useless-parent-delegation - self, - *, - method: FillnaOptions, - limit: int | None = None, - limit_area: Literal["inside", "outside"] | None = None, - copy: bool = True, - ) -> Self: - # TODO(3.0): after EA.fillna 'method' deprecation is enforced, we can remove - # this method entirely. - return super()._pad_or_backfill( - method=method, limit=limit, limit_area=limit_area, copy=copy - ) - - def fillna( - self, value=None, method=None, limit: int | None = None, copy: bool = True - ) -> Self: + def fillna(self, value=None, limit: int | None = None, copy: bool = True) -> Self: """ Fill NA/NaN values using the specified method. @@ -921,9 +904,6 @@ def fillna( Alternatively, a Series or dict can be used to fill in different values for each index. The value should not be a list. The value(s) passed should be either Interval objects or NA/NaN. - method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None - (Not implemented yet for IntervalArray) - Method to use for filling holes in reindexed Series limit : int, default None (Not implemented yet for IntervalArray) If method is specified, this is the maximum number of consecutive @@ -944,8 +924,6 @@ def fillna( """ if copy is False: raise NotImplementedError - if method is not None: - return super().fillna(value=value, method=method, limit=limit) value_left, value_right = self._validate_scalar(value) diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index 108202f5e510b..d20d7f98b8aa8 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -38,7 +38,6 @@ ) from pandas.errors import AbstractMethodError from pandas.util._decorators import doc -from pandas.util._validators import validate_fillna_kwargs from pandas.core.dtypes.base import ExtensionDtype from pandas.core.dtypes.common import ( @@ -237,32 +236,18 @@ def _pad_or_backfill( return new_values @doc(ExtensionArray.fillna) - def fillna( - self, value=None, method=None, limit: int | None = None, copy: bool = True - ) -> Self: - value, method = validate_fillna_kwargs(value, method) - + def fillna(self, value=None, limit: int | None = None, copy: bool = True) -> Self: mask = self._mask value = missing.check_value_size(value, mask, len(self)) if mask.any(): - if method is not None: - func = missing.get_fill_func(method, ndim=self.ndim) - npvalues = self._data.T - new_mask = mask.T - if copy: - npvalues = npvalues.copy() - new_mask = new_mask.copy() - func(npvalues, limit=limit, mask=new_mask) - return self._simple_new(npvalues.T, new_mask.T) + # fill with value + if copy: + new_values = self.copy() else: - # fill with value - if copy: - new_values = self.copy() - else: - new_values = self[:] - new_values[mask] = value + new_values = self[:] + new_values[mask] = value else: if copy: new_values = self.copy() diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index d05f857f46179..e73eba710ec39 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -847,19 +847,6 @@ def _pad_or_backfill( else: return self - def fillna( - self, value=None, method=None, limit: int | None = None, copy: bool = True - ) -> Self: - if method is not None: - # view as dt64 so we get treated as timelike in core.missing, - # similar to dtl._period_dispatch - dta = self.view("M8[ns]") - result = dta.fillna(value=value, method=method, limit=limit, copy=copy) - # error: Incompatible return value type (got "Union[ExtensionArray, - # ndarray[Any, Any]]", expected "PeriodArray") - return result.view(self.dtype) # type: ignore[return-value] - return super().fillna(value=value, method=method, limit=limit, copy=copy) - # ------------------------------------------------------------------ # Arithmetic Methods diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index bf44e5e099530..bdcb3219a9875 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -98,10 +98,7 @@ class ellipsis(Enum): from scipy.sparse import spmatrix - from pandas._typing import ( - FillnaOptions, - NumpySorter, - ) + from pandas._typing import NumpySorter SparseIndexKind = Literal["integer", "block"] @@ -717,24 +714,9 @@ def isna(self) -> Self: # type: ignore[override] mask[self.sp_index.indices] = isna(self.sp_values) return type(self)(mask, fill_value=False, dtype=dtype) - def _pad_or_backfill( # pylint: disable=useless-parent-delegation - self, - *, - method: FillnaOptions, - limit: int | None = None, - limit_area: Literal["inside", "outside"] | None = None, - copy: bool = True, - ) -> Self: - # TODO(3.0): We can remove this method once deprecation for fillna method - # keyword is enforced. - return super()._pad_or_backfill( - method=method, limit=limit, limit_area=limit_area, copy=copy - ) - def fillna( self, value=None, - method: FillnaOptions | None = None, limit: int | None = None, copy: bool = True, ) -> Self: @@ -743,17 +725,8 @@ def fillna( Parameters ---------- - value : scalar, optional - method : str, optional - - .. warning:: - - Using 'method' will result in high memory use, - as all `fill_value` methods will be converted to - an in-memory ndarray - + value : scalar limit : int, optional - copy: bool, default True Ignored for SparseArray. @@ -773,22 +746,15 @@ def fillna( When ``self.fill_value`` is not NA, the result dtype will be ``self.dtype``. Again, this preserves the amount of memory used. """ - if (method is None and value is None) or ( - method is not None and value is not None - ): - raise ValueError("Must specify one of 'method' or 'value'.") - - if method is not None: - return super().fillna(method=method, limit=limit) + if value is None: + raise ValueError("Must specify 'value'.") + new_values = np.where(isna(self.sp_values), value, self.sp_values) + if self._null_fill_value: + # This is essentially just updating the dtype. + new_dtype = SparseDtype(self.dtype.subtype, fill_value=value) else: - new_values = np.where(isna(self.sp_values), value, self.sp_values) - - if self._null_fill_value: - # This is essentially just updating the dtype. - new_dtype = SparseDtype(self.dtype.subtype, fill_value=value) - else: - new_dtype = self.dtype + new_dtype = self.dtype return self._simple_new(new_values, self._sparse_index, new_dtype) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index c0eda7f022d8f..f7607820180c3 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -99,7 +99,6 @@ check_dtype_backend, validate_ascending, validate_bool_kwarg, - validate_fillna_kwargs, validate_inclusive, ) @@ -9578,7 +9577,6 @@ def _align_series( # fill fill_na = notna(fill_value) if fill_na: - fill_value, _ = validate_fillna_kwargs(fill_value, None) left = left.fillna(fill_value) right = right.fillna(fill_value) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index f6bf5dffb5f48..a7cdc7c39754d 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1873,13 +1873,11 @@ def fillna( copy, refs = self._get_refs_and_copy(inplace) try: - new_values = self.values.fillna( - value=value, method=None, limit=limit, copy=copy - ) + new_values = self.values.fillna(value=value, limit=limit, copy=copy) except TypeError: # 3rd party EA that has not implemented copy keyword yet refs = None - new_values = self.values.fillna(value=value, method=None, limit=limit) + new_values = self.values.fillna(value=value, limit=limit) # issue the warning *after* retrying, in case the TypeError # was caused by an invalid fill_value warnings.warn( diff --git a/pandas/tests/arrays/categorical/test_missing.py b/pandas/tests/arrays/categorical/test_missing.py index 332d31e9e3fc2..9d4b78ce9944e 100644 --- a/pandas/tests/arrays/categorical/test_missing.py +++ b/pandas/tests/arrays/categorical/test_missing.py @@ -62,34 +62,6 @@ def test_set_item_nan(self): exp = Categorical([1, np.nan, 3], categories=[1, 2, 3]) tm.assert_categorical_equal(cat, exp) - @pytest.mark.parametrize( - "fillna_kwargs, msg", - [ - ( - {"value": 1, "method": "ffill"}, - "Cannot specify both 'value' and 'method'.", - ), - ({}, "Must specify a fill 'value' or 'method'."), - ({"method": "bad"}, "Invalid fill method. Expecting .* bad"), - ( - {"value": Series([1, 2, 3, 4, "a"])}, - "Cannot setitem on a Categorical with a new category", - ), - ], - ) - def test_fillna_raises(self, fillna_kwargs, msg): - # https://github.com/pandas-dev/pandas/issues/19682 - # https://github.com/pandas-dev/pandas/issues/13628 - cat = Categorical([1, 2, 3, None, None]) - - if len(fillna_kwargs) == 1 and "value" in fillna_kwargs: - err = TypeError - else: - err = ValueError - - with pytest.raises(err, match=msg): - cat.fillna(**fillna_kwargs) - @pytest.mark.parametrize("named", [True, False]) def test_fillna_iterable_category(self, named): # https://github.com/pandas-dev/pandas/issues/21097 diff --git a/pandas/tests/extension/conftest.py b/pandas/tests/extension/conftest.py index 5ae0864190f10..97fb5a0bc5066 100644 --- a/pandas/tests/extension/conftest.py +++ b/pandas/tests/extension/conftest.py @@ -189,7 +189,7 @@ def use_numpy(request): def fillna_method(request): """ Parametrized fixture giving method parameters 'ffill' and 'bfill' for - Series.fillna(method=<method>) testing. + Series.<method> testing. """ return request.param diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py index 709cff59cd824..59f313b4c9edb 100644 --- a/pandas/tests/extension/decimal/array.py +++ b/pandas/tests/extension/decimal/array.py @@ -287,17 +287,10 @@ def value_counts(self, dropna: bool = True): return value_counts(self.to_numpy(), dropna=dropna) # We override fillna here to simulate a 3rd party EA that has done so. This - # lets us test the deprecation telling authors to implement _pad_or_backfill - # Simulate a 3rd-party EA that has not yet updated to include a "copy" + # lets us test a 3rd-party EA that has not yet updated to include a "copy" # keyword in its fillna method. - # error: Signature of "fillna" incompatible with supertype "ExtensionArray" - def fillna( # type: ignore[override] - self, - value=None, - method=None, - limit: int | None = None, - ): - return super().fillna(value=value, method=method, limit=limit, copy=True) + def fillna(self, value=None, limit=None): + return super().fillna(value=value, limit=limit, copy=True) def to_decimal(values, context=None): diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py index bed3ec62f43da..a2721908e858f 100644 --- a/pandas/tests/extension/decimal/test_decimal.py +++ b/pandas/tests/extension/decimal/test_decimal.py @@ -137,86 +137,6 @@ def test_fillna_frame(self, data_missing): ): super().test_fillna_frame(data_missing) - def test_fillna_limit_pad(self, data_missing): - msg = "ExtensionArray.fillna 'method' keyword is deprecated" - with tm.assert_produces_warning( - DeprecationWarning, - match=msg, - check_stacklevel=False, - raise_on_extra_warnings=False, - ): - super().test_fillna_limit_pad(data_missing) - - msg = "The 'method' keyword in DecimalArray.fillna is deprecated" - with tm.assert_produces_warning( - FutureWarning, - match=msg, - check_stacklevel=False, - raise_on_extra_warnings=False, - ): - super().test_fillna_limit_pad(data_missing) - - @pytest.mark.parametrize( - "limit_area, input_ilocs, expected_ilocs", - [ - ("outside", [1, 0, 0, 0, 1], [1, 0, 0, 0, 1]), - ("outside", [1, 0, 1, 0, 1], [1, 0, 1, 0, 1]), - ("outside", [0, 1, 1, 1, 0], [0, 1, 1, 1, 1]), - ("outside", [0, 1, 0, 1, 0], [0, 1, 0, 1, 1]), - ("inside", [1, 0, 0, 0, 1], [1, 1, 1, 1, 1]), - ("inside", [1, 0, 1, 0, 1], [1, 1, 1, 1, 1]), - ("inside", [0, 1, 1, 1, 0], [0, 1, 1, 1, 0]), - ("inside", [0, 1, 0, 1, 0], [0, 1, 1, 1, 0]), - ], - ) - def test_ffill_limit_area( - self, data_missing, limit_area, input_ilocs, expected_ilocs - ): - # GH#56616 - msg = "ExtensionArray.fillna 'method' keyword is deprecated" - with tm.assert_produces_warning( - DeprecationWarning, - match=msg, - check_stacklevel=False, - raise_on_extra_warnings=False, - ): - msg = "DecimalArray does not implement limit_area" - with pytest.raises(NotImplementedError, match=msg): - super().test_ffill_limit_area( - data_missing, limit_area, input_ilocs, expected_ilocs - ) - - def test_fillna_limit_backfill(self, data_missing): - msg = "ExtensionArray.fillna 'method' keyword is deprecated" - with tm.assert_produces_warning( - DeprecationWarning, - match=msg, - check_stacklevel=False, - raise_on_extra_warnings=False, - ): - super().test_fillna_limit_backfill(data_missing) - - msg = "The 'method' keyword in DecimalArray.fillna is deprecated" - with tm.assert_produces_warning( - FutureWarning, - match=msg, - check_stacklevel=False, - raise_on_extra_warnings=False, - ): - super().test_fillna_limit_backfill(data_missing) - - def test_fillna_no_op_returns_copy(self, data): - msg = "|".join( - [ - "ExtensionArray.fillna 'method' keyword is deprecated", - "The 'method' keyword in DecimalArray.fillna is deprecated", - ] - ) - with tm.assert_produces_warning( - (FutureWarning, DeprecationWarning), match=msg, check_stacklevel=False - ): - super().test_fillna_no_op_returns_copy(data) - def test_fillna_series(self, data_missing): msg = "ExtensionArray.fillna added a 'copy' keyword" with tm.assert_produces_warning( @@ -224,18 +144,6 @@ def test_fillna_series(self, data_missing): ): super().test_fillna_series(data_missing) - def test_fillna_series_method(self, data_missing, fillna_method): - msg = "|".join( - [ - "ExtensionArray.fillna 'method' keyword is deprecated", - "The 'method' keyword in DecimalArray.fillna is deprecated", - ] - ) - with tm.assert_produces_warning( - (FutureWarning, DeprecationWarning), match=msg, check_stacklevel=False - ): - super().test_fillna_series_method(data_missing, fillna_method) - @pytest.mark.parametrize("dropna", [True, False]) def test_value_counts(self, all_data, dropna): all_data = all_data[:10] diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py index 11a9f4f22167f..9b2251d0b7d4a 100644 --- a/pandas/tests/extension/test_arrow.py +++ b/pandas/tests/extension/test_arrow.py @@ -706,10 +706,6 @@ def test_fillna_no_op_returns_copy(self, data): assert result is not data tm.assert_extension_array_equal(result, data) - result = data.fillna(method="backfill") - assert result is not data - tm.assert_extension_array_equal(result, data) - @pytest.mark.xfail( reason="GH 45419: pyarrow.ChunkedArray does not support views", run=False ) diff --git a/pandas/tests/extension/test_string.py b/pandas/tests/extension/test_string.py index c09d4d315451f..49ad3fce92a5c 100644 --- a/pandas/tests/extension/test_string.py +++ b/pandas/tests/extension/test_string.py @@ -136,10 +136,6 @@ def test_fillna_no_op_returns_copy(self, data): assert result is not data tm.assert_extension_array_equal(result, data) - result = data.fillna(method="backfill") - assert result is not data - tm.assert_extension_array_equal(result, data) - def _get_expected_exception( self, op_name: str, obj, other ) -> type[Exception] | None:
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Ref: #53621 Ran into a little trouble here - `NDFrame.fillna` has a `limit` argument that wasn't deprecated and works, but `EA.fillna` has this deprecated and is currently ignored. Currently this PR keeps the `limit` argument in `EA.fillna` and removes the deprecation, but the implementation still ignores it. If we want to keep it, I plan on doing a followup implementing `limit` across all EAs (I think this should be straightforward except for maybe Sparse - not sure). As an alternative, we could also deprecate `limit` on `NDFrame.fillna` (not my preference), and then I can keep but not enforce the deprecation on `EA.fillna` here. cc @jbrockmendel
https://api.github.com/repos/pandas-dev/pandas/pulls/57983
2024-03-24T14:03:06Z
2024-03-25T17:38:07Z
2024-03-25T17:38:07Z
2024-03-25T20:35:50Z
DOC Add documentation for how pandas rounds values in Series.round and Dataframe.round methods
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 5d10a5541f556..2222164da90c7 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -10703,6 +10703,12 @@ def round( numpy.around : Round a numpy array to the given number of decimals. Series.round : Round a Series to the given number of decimals. + Notes + ----- + For values exactly halfway between rounded decimal values, pandas rounds + to the nearest even value (e.g. -0.5 and 0.5 round to 0.0, 1.5 and 2.5 + round to 2.0, etc.). + Examples -------- >>> df = pd.DataFrame( diff --git a/pandas/core/series.py b/pandas/core/series.py index 08e56cb4925b3..0be7a0a7aaa82 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2509,13 +2509,21 @@ def round(self, decimals: int = 0, *args, **kwargs) -> Series: numpy.around : Round values of an np.array. DataFrame.round : Round values of a DataFrame. + Notes + ----- + For values exactly halfway between rounded decimal values, pandas rounds + to the nearest even value (e.g. -0.5 and 0.5 round to 0.0, 1.5 and 2.5 + round to 2.0, etc.). + Examples -------- - >>> s = pd.Series([0.1, 1.3, 2.7]) + >>> s = pd.Series([-0.5, 0.1, 2.5, 1.3, 2.7]) >>> s.round() - 0 0.0 - 1 1.0 - 2 3.0 + 0 -0.0 + 1 0.0 + 2 2.0 + 3 1.0 + 4 3.0 dtype: float64 """ nv.validate_round(args, kwargs)
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Description: In the pandas documentation it is not documented how numbers are rounded if values are exactly halfway between rounded decimal values (they are rounded to the nearest even value). I added the documentation for this behaviour.
https://api.github.com/repos/pandas-dev/pandas/pulls/57981
2024-03-24T11:59:13Z
2024-03-24T22:13:41Z
2024-03-24T22:13:41Z
2024-03-25T08:05:26Z
DOC: clarify three documentation strings in base.py
diff --git a/pandas/core/base.py b/pandas/core/base.py index 987136ffdff7d..d43222f1acd11 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -1065,7 +1065,7 @@ def nunique(self, dropna: bool = True) -> int: @property def is_unique(self) -> bool: """ - Return boolean if values in the object are unique. + Return True if values in the object are unique. Returns ------- @@ -1086,7 +1086,7 @@ def is_unique(self) -> bool: @property def is_monotonic_increasing(self) -> bool: """ - Return boolean if values in the object are monotonically increasing. + Return True if values in the object are monotonically increasing. Returns ------- @@ -1109,7 +1109,7 @@ def is_monotonic_increasing(self) -> bool: @property def is_monotonic_decreasing(self) -> bool: """ - Return boolean if values in the object are monotonically decreasing. + Return True if values in the object are monotonically decreasing. Returns -------
This change is consistent with other documentation strings in this file.
https://api.github.com/repos/pandas-dev/pandas/pulls/57978
2024-03-23T20:42:18Z
2024-03-25T17:43:31Z
2024-03-25T17:43:31Z
2024-03-25T17:43:44Z
PDEP: Change status of CoW proposal to implemented
diff --git a/web/pandas/pdeps/0007-copy-on-write.md b/web/pandas/pdeps/0007-copy-on-write.md index e45fbaf555bc1..f5adb6a571120 100644 --- a/web/pandas/pdeps/0007-copy-on-write.md +++ b/web/pandas/pdeps/0007-copy-on-write.md @@ -1,7 +1,7 @@ # PDEP-7: Consistent copy/view semantics in pandas with Copy-on-Write - Created: July 2021 -- Status: Accepted +- Status: Implemented - Discussion: [#36195](https://github.com/pandas-dev/pandas/issues/36195) - Author: [Joris Van den Bossche](https://github.com/jorisvandenbossche) - Revision: 1
We already enabled CoW by default and removed the legacy mode
https://api.github.com/repos/pandas-dev/pandas/pulls/57977
2024-03-23T16:58:11Z
2024-03-25T17:41:01Z
2024-03-25T17:41:01Z
2024-03-25T21:32:57Z
DOC: fix list indentation in pandas.DataFrame.stack
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 5d10a5541f556..8fb400872378c 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -9288,10 +9288,9 @@ def stack( DataFrame. The new inner-most levels are created by pivoting the columns of the current dataframe: - - if the columns have a single level, the output is a Series; - - if the columns have multiple levels, the new index - level(s) is (are) taken from the prescribed level(s) and - the output is a DataFrame. + - if the columns have a single level, the output is a Series; + - if the columns have multiple levels, the new index level(s) is (are) + taken from the prescribed level(s) and the output is a DataFrame. Parameters ----------
Fixed the grey-out box around the list of options for multiple levels of columns.
https://api.github.com/repos/pandas-dev/pandas/pulls/57975
2024-03-23T16:38:04Z
2024-03-25T11:40:22Z
2024-03-25T11:40:22Z
2024-03-25T11:40:22Z
BUG: Fixed ADBC to_sql creation of table when using public schema
diff --git a/doc/source/whatsnew/v2.2.2.rst b/doc/source/whatsnew/v2.2.2.rst index d0f8951ac07ad..9e1a883d47cf8 100644 --- a/doc/source/whatsnew/v2.2.2.rst +++ b/doc/source/whatsnew/v2.2.2.rst @@ -25,6 +25,7 @@ Bug fixes - :meth:`DataFrame.__dataframe__` was producing incorrect data buffers when the column's type was nullable boolean (:issue:`55332`) - :meth:`DataFrame.__dataframe__` was showing bytemask instead of bitmask for ``'string[pyarrow]'`` validity buffer (:issue:`57762`) - :meth:`DataFrame.__dataframe__` was showing non-null validity buffer (instead of ``None``) ``'string[pyarrow]'`` without missing values (:issue:`57761`) +- :meth:`DataFrame.to_sql` was failing to find the right table when using the schema argument (:issue:`57539`) .. --------------------------------------------------------------------------- .. _whatsnew_222.other: diff --git a/pandas/io/sql.py b/pandas/io/sql.py index b80487abbc4ab..aa9d0d88ae69a 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -2380,7 +2380,9 @@ def to_sql( raise ValueError("datatypes not supported") from exc with self.con.cursor() as cur: - total_inserted = cur.adbc_ingest(table_name, tbl, mode=mode) + total_inserted = cur.adbc_ingest( + table_name=name, data=tbl, mode=mode, db_schema_name=schema + ) self.con.commit() return total_inserted diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index c8f4d68230e5b..67b1311a5a798 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -1373,6 +1373,30 @@ def insert_on_conflict(table, conn, keys, data_iter): pandasSQL.drop_table("test_insert_conflict") +@pytest.mark.parametrize("conn", all_connectable) +def test_to_sql_on_public_schema(conn, request): + if "sqlite" in conn or "mysql" in conn: + request.applymarker( + pytest.mark.xfail( + reason="test for public schema only specific to postgresql" + ) + ) + + conn = request.getfixturevalue(conn) + + test_data = DataFrame([[1, 2.1, "a"], [2, 3.1, "b"]], columns=list("abc")) + test_data.to_sql( + name="test_public_schema", + con=conn, + if_exists="append", + index=False, + schema="public", + ) + + df_out = sql.read_sql_table("test_public_schema", conn, schema="public") + tm.assert_frame_equal(test_data, df_out) + + @pytest.mark.parametrize("conn", mysql_connectable) def test_insertion_method_on_conflict_update(conn, request): # GH 14553: Example in to_sql docstring
Problem: table on public schema being lost when tried to be created Solution: used db_schema_name argument to specify schema name of adbc_ingest - [x] closes #57539 (Replace xxxx with the GitHub issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/57974
2024-03-23T16:19:24Z
2024-03-28T17:55:54Z
2024-03-28T17:55:54Z
2024-03-28T17:56:04Z
Changed the strings to make code simpler
diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py index 1716110b619d6..69697906e493e 100644 --- a/asv_bench/benchmarks/categoricals.py +++ b/asv_bench/benchmarks/categoricals.py @@ -88,7 +88,7 @@ def setup(self): ) for col in ("int", "float", "timestamp"): - self.df[col + "_as_str"] = self.df[col].astype(str) + self.df[f"{col}_as_str"] = self.df[col].astype(str) for col in self.df.columns: self.df[col] = self.df[col].astype("category")
Changed the strings to make code simpler - [X] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [X] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
https://api.github.com/repos/pandas-dev/pandas/pulls/57973
2024-03-23T16:10:53Z
2024-03-25T17:44:31Z
2024-03-25T17:44:31Z
2024-03-25T17:44:38Z
CLN: Enforce deprecation of argmin/max and idxmin/max with NA values
diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index f225d384888e3..c5d032f9dace5 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -259,6 +259,7 @@ Removal of prior version deprecations/changes - Removed the :class:`Grouper` attributes ``ax``, ``groups``, ``indexer``, and ``obj`` (:issue:`51206`, :issue:`51182`) - Removed deprecated keyword ``verbose`` on :func:`read_csv` and :func:`read_table` (:issue:`56556`) - Removed the attribute ``dtypes`` from :class:`.DataFrameGroupBy` (:issue:`51997`) +- Enforced deprecation of ``argmin``, ``argmax``, ``idxmin``, and ``idxmax`` returning a result when ``skipna=False`` and an NA value is encountered or all values are NA values; these operations will now raise in such cases (:issue:`33941`, :issue:`51276`) .. --------------------------------------------------------------------------- .. _whatsnew_300.performance: diff --git a/pandas/core/base.py b/pandas/core/base.py index 987136ffdff7d..80919130cab63 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -14,7 +14,6 @@ final, overload, ) -import warnings import numpy as np @@ -35,7 +34,6 @@ cache_readonly, doc, ) -from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import can_hold_element from pandas.core.dtypes.common import ( @@ -686,7 +684,8 @@ def argmax( axis : {{None}} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True - Exclude NA/null values when showing the result. + Exclude NA/null values. If the entire Series is NA, or if ``skipna=False`` + and there is an NA value, this method will raise a ``ValueError``. *args, **kwargs Additional arguments and keywords for compatibility with NumPy. @@ -736,28 +735,15 @@ def argmax( nv.validate_minmax_axis(axis) skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs) + if skipna and len(delegate) > 0 and isna(delegate).all(): + raise ValueError("Encountered all NA values") + elif not skipna and isna(delegate).any(): + raise ValueError("Encountered an NA value with skipna=False") + if isinstance(delegate, ExtensionArray): - if not skipna and delegate.isna().any(): - warnings.warn( - f"The behavior of {type(self).__name__}.argmax/argmin " - "with skipna=False and NAs, or with all-NAs is deprecated. " - "In a future version this will raise ValueError.", - FutureWarning, - stacklevel=find_stack_level(), - ) - return -1 - else: - return delegate.argmax() + return delegate.argmax() else: result = nanops.nanargmax(delegate, skipna=skipna) - if result == -1: - warnings.warn( - f"The behavior of {type(self).__name__}.argmax/argmin " - "with skipna=False and NAs, or with all-NAs is deprecated. " - "In a future version this will raise ValueError.", - FutureWarning, - stacklevel=find_stack_level(), - ) # error: Incompatible return value type (got "Union[int, ndarray]", expected # "int") return result # type: ignore[return-value] @@ -770,28 +756,15 @@ def argmin( nv.validate_minmax_axis(axis) skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs) + if skipna and len(delegate) > 0 and isna(delegate).all(): + raise ValueError("Encountered all NA values") + elif not skipna and isna(delegate).any(): + raise ValueError("Encountered an NA value with skipna=False") + if isinstance(delegate, ExtensionArray): - if not skipna and delegate.isna().any(): - warnings.warn( - f"The behavior of {type(self).__name__}.argmax/argmin " - "with skipna=False and NAs, or with all-NAs is deprecated. " - "In a future version this will raise ValueError.", - FutureWarning, - stacklevel=find_stack_level(), - ) - return -1 - else: - return delegate.argmin() + return delegate.argmin() else: result = nanops.nanargmin(delegate, skipna=skipna) - if result == -1: - warnings.warn( - f"The behavior of {type(self).__name__}.argmax/argmin " - "with skipna=False and NAs, or with all-NAs is deprecated. " - "In a future version this will raise ValueError.", - FutureWarning, - stacklevel=find_stack_level(), - ) # error: Incompatible return value type (got "Union[int, ndarray]", expected # "int") return result # type: ignore[return-value] diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 9a537c71f3cd0..3cb37e037ecd3 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -6976,16 +6976,10 @@ def argmin(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: if not self._is_multi and self.hasnans: # Take advantage of cache - mask = self._isnan - if not skipna or mask.all(): - warnings.warn( - f"The behavior of {type(self).__name__}.argmax/argmin " - "with skipna=False and NAs, or with all-NAs is deprecated. " - "In a future version this will raise ValueError.", - FutureWarning, - stacklevel=find_stack_level(), - ) - return -1 + if self._isnan.all(): + raise ValueError("Encountered all NA values") + elif not skipna: + raise ValueError("Encountered an NA value with skipna=False") return super().argmin(skipna=skipna) @Appender(IndexOpsMixin.argmax.__doc__) @@ -6995,16 +6989,10 @@ def argmax(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: if not self._is_multi and self.hasnans: # Take advantage of cache - mask = self._isnan - if not skipna or mask.all(): - warnings.warn( - f"The behavior of {type(self).__name__}.argmax/argmin " - "with skipna=False and NAs, or with all-NAs is deprecated. " - "In a future version this will raise ValueError.", - FutureWarning, - stacklevel=find_stack_level(), - ) - return -1 + if self._isnan.all(): + raise ValueError("Encountered all NA values") + elif not skipna: + raise ValueError("Encountered an NA value with skipna=False") return super().argmax(skipna=skipna) def min(self, axis=None, skipna: bool = True, *args, **kwargs): diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 6cb825e9b79a2..b68337d9e0de9 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -1441,17 +1441,18 @@ def _maybe_arg_null_out( if axis is None or not getattr(result, "ndim", False): if skipna: if mask.all(): - return -1 + raise ValueError("Encountered all NA values") else: if mask.any(): - return -1 + raise ValueError("Encountered an NA value with skipna=False") else: - if skipna: - na_mask = mask.all(axis) - else: - na_mask = mask.any(axis) + na_mask = mask.all(axis) if na_mask.any(): - result[na_mask] = -1 + raise ValueError("Encountered all NA values") + elif not skipna: + na_mask = mask.any(axis) + if na_mask.any(): + raise ValueError("Encountered an NA value with skipna=False") return result diff --git a/pandas/core/series.py b/pandas/core/series.py index 08e56cb4925b3..4b0eb4e1f7358 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2333,8 +2333,8 @@ def idxmin(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashab axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True - Exclude NA/null values. If the entire Series is NA, the result - will be NA. + Exclude NA/null values. If the entire Series is NA, or if ``skipna=False`` + and there is an NA value, this method will raise a ``ValueError``. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. @@ -2376,32 +2376,10 @@ def idxmin(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashab >>> s.idxmin() 'A' - - If `skipna` is False and there is an NA value in the data, - the function returns ``nan``. - - >>> s.idxmin(skipna=False) - nan """ axis = self._get_axis_number(axis) - with warnings.catch_warnings(): - # TODO(3.0): this catching/filtering can be removed - # ignore warning produced by argmin since we will issue a different - # warning for idxmin - warnings.simplefilter("ignore") - i = self.argmin(axis, skipna, *args, **kwargs) - - if i == -1: - # GH#43587 give correct NA value for Index. - warnings.warn( - f"The behavior of {type(self).__name__}.idxmin with all-NA " - "values, or any-NA and skipna=False, is deprecated. In a future " - "version this will raise ValueError", - FutureWarning, - stacklevel=find_stack_level(), - ) - return self.index._na_value - return self.index[i] + iloc = self.argmin(axis, skipna, *args, **kwargs) + return self.index[iloc] def idxmax(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ @@ -2415,8 +2393,8 @@ def idxmax(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashab axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True - Exclude NA/null values. If the entire Series is NA, the result - will be NA. + Exclude NA/null values. If the entire Series is NA, or if ``skipna=False`` + and there is an NA value, this method will raise a ``ValueError``. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. @@ -2459,32 +2437,10 @@ def idxmax(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashab >>> s.idxmax() 'C' - - If `skipna` is False and there is an NA value in the data, - the function returns ``nan``. - - >>> s.idxmax(skipna=False) - nan """ axis = self._get_axis_number(axis) - with warnings.catch_warnings(): - # TODO(3.0): this catching/filtering can be removed - # ignore warning produced by argmax since we will issue a different - # warning for argmax - warnings.simplefilter("ignore") - i = self.argmax(axis, skipna, *args, **kwargs) - - if i == -1: - # GH#43587 give correct NA value for Index. - warnings.warn( - f"The behavior of {type(self).__name__}.idxmax with all-NA " - "values, or any-NA and skipna=False, is deprecated. In a future " - "version this will raise ValueError", - FutureWarning, - stacklevel=find_stack_level(), - ) - return self.index._na_value - return self.index[i] + iloc = self.argmax(axis, skipna, *args, **kwargs) + return self.index[iloc] def round(self, decimals: int = 0, *args, **kwargs) -> Series: """ diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py index 15aa210a09d6d..a2b5439f9e12f 100644 --- a/pandas/core/shared_docs.py +++ b/pandas/core/shared_docs.py @@ -692,8 +692,8 @@ axis : {{0 or 'index', 1 or 'columns'}}, default 0 The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. skipna : bool, default True - Exclude NA/null values. If an entire row/column is NA, the result - will be NA. + Exclude NA/null values. If the entire Series is NA, or if ``skipna=False`` + and there is an NA value, this method will raise a ``ValueError``. numeric_only : bool, default {numeric_only_default} Include only `float`, `int` or `boolean` data. @@ -757,8 +757,8 @@ axis : {{0 or 'index', 1 or 'columns'}}, default 0 The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. skipna : bool, default True - Exclude NA/null values. If an entire row/column is NA, the result - will be NA. + Exclude NA/null values. If the entire Series is NA, or if ``skipna=False`` + and there is an NA value, this method will raise a ``ValueError``. numeric_only : bool, default {numeric_only_default} Include only `float`, `int` or `boolean` data. diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py index c803a8113b4a4..26638c6160b7b 100644 --- a/pandas/tests/extension/base/methods.py +++ b/pandas/tests/extension/base/methods.py @@ -169,8 +169,8 @@ def test_argmin_argmax_all_na(self, method, data, na_value): ("idxmin", True, 2), ("argmax", True, 0), ("argmin", True, 2), - ("idxmax", False, np.nan), - ("idxmin", False, np.nan), + ("idxmax", False, -1), + ("idxmin", False, -1), ("argmax", False, -1), ("argmin", False, -1), ], @@ -179,17 +179,13 @@ def test_argreduce_series( self, data_missing_for_sorting, op_name, skipna, expected ): # data_missing_for_sorting -> [B, NA, A] with A < B and NA missing. - warn = None - msg = "The behavior of Series.argmax/argmin" - if op_name.startswith("arg") and expected == -1: - warn = FutureWarning - if op_name.startswith("idx") and np.isnan(expected): - warn = FutureWarning - msg = f"The behavior of Series.{op_name}" ser = pd.Series(data_missing_for_sorting) - with tm.assert_produces_warning(warn, match=msg): + if expected == -1: + with pytest.raises(ValueError, match="Encountered an NA value"): + getattr(ser, op_name)(skipna=skipna) + else: result = getattr(ser, op_name)(skipna=skipna) - tm.assert_almost_equal(result, expected) + tm.assert_almost_equal(result, expected) def test_argmax_argmin_no_skipna_notimplemented(self, data_missing_for_sorting): # GH#38733 diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py index 63c15fab76562..408cb0ab6fc5c 100644 --- a/pandas/tests/frame/test_reductions.py +++ b/pandas/tests/frame/test_reductions.py @@ -1065,18 +1065,20 @@ def test_idxmin(self, float_frame, int_frame, skipna, axis): frame.iloc[5:10] = np.nan frame.iloc[15:20, -2:] = np.nan for df in [frame, int_frame]: - warn = None - if skipna is False or axis == 1: - warn = None if df is int_frame else FutureWarning - msg = "The behavior of DataFrame.idxmin with all-NA values" - with tm.assert_produces_warning(warn, match=msg): + if (not skipna or axis == 1) and df is not int_frame: + if axis == 1: + msg = "Encountered all NA values" + else: + msg = "Encountered an NA value" + with pytest.raises(ValueError, match=msg): + df.idxmin(axis=axis, skipna=skipna) + with pytest.raises(ValueError, match=msg): + df.idxmin(axis=axis, skipna=skipna) + else: result = df.idxmin(axis=axis, skipna=skipna) - - msg2 = "The behavior of Series.idxmin" - with tm.assert_produces_warning(warn, match=msg2): expected = df.apply(Series.idxmin, axis=axis, skipna=skipna) - expected = expected.astype(df.index.dtype) - tm.assert_series_equal(result, expected) + expected = expected.astype(df.index.dtype) + tm.assert_series_equal(result, expected) @pytest.mark.parametrize("axis", [0, 1]) @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") @@ -1113,16 +1115,17 @@ def test_idxmax(self, float_frame, int_frame, skipna, axis): frame.iloc[5:10] = np.nan frame.iloc[15:20, -2:] = np.nan for df in [frame, int_frame]: - warn = None - if skipna is False or axis == 1: - warn = None if df is int_frame else FutureWarning - msg = "The behavior of DataFrame.idxmax with all-NA values" - with tm.assert_produces_warning(warn, match=msg): - result = df.idxmax(axis=axis, skipna=skipna) + if (skipna is False or axis == 1) and df is frame: + if axis == 1: + msg = "Encountered all NA values" + else: + msg = "Encountered an NA value" + with pytest.raises(ValueError, match=msg): + df.idxmax(axis=axis, skipna=skipna) + return - msg2 = "The behavior of Series.idxmax" - with tm.assert_produces_warning(warn, match=msg2): - expected = df.apply(Series.idxmax, axis=axis, skipna=skipna) + result = df.idxmax(axis=axis, skipna=skipna) + expected = df.apply(Series.idxmax, axis=axis, skipna=skipna) expected = expected.astype(df.index.dtype) tm.assert_series_equal(result, expected) @@ -2118,15 +2121,16 @@ def test_numeric_ea_axis_1(method, skipna, min_count, any_numeric_ea_dtype): if method in ("prod", "product", "sum"): kwargs["min_count"] = min_count - warn = None - msg = None if not skipna and method in ("idxmax", "idxmin"): - warn = FutureWarning + # GH#57745 - EAs use groupby for axis=1 which still needs a proper deprecation. msg = f"The behavior of DataFrame.{method} with all-NA values" - with tm.assert_produces_warning(warn, match=msg): - result = getattr(df, method)(axis=1, **kwargs) - with tm.assert_produces_warning(warn, match=msg): - expected = getattr(expected_df, method)(axis=1, **kwargs) + with tm.assert_produces_warning(FutureWarning, match=msg): + getattr(df, method)(axis=1, **kwargs) + with pytest.raises(ValueError, match="Encountered an NA value"): + getattr(expected_df, method)(axis=1, **kwargs) + return + result = getattr(df, method)(axis=1, **kwargs) + expected = getattr(expected_df, method)(axis=1, **kwargs) if method not in ("idxmax", "idxmin"): expected = expected.astype(expected_dtype) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py index 91ee13ecd87dd..b10319f5380e7 100644 --- a/pandas/tests/reductions/test_reductions.py +++ b/pandas/tests/reductions/test_reductions.py @@ -128,28 +128,14 @@ def test_nanargminmax(self, opname, index_or_series): obj = klass([NaT, datetime(2011, 11, 1)]) assert getattr(obj, arg_op)() == 1 - msg = ( - "The behavior of (DatetimeIndex|Series).argmax/argmin with " - "skipna=False and NAs" - ) - if klass is Series: - msg = "The behavior of Series.(idxmax|idxmin) with all-NA" - with tm.assert_produces_warning(FutureWarning, match=msg): - result = getattr(obj, arg_op)(skipna=False) - if klass is Series: - assert np.isnan(result) - else: - assert result == -1 + with pytest.raises(ValueError, match="Encountered an NA value"): + getattr(obj, arg_op)(skipna=False) obj = klass([NaT, datetime(2011, 11, 1), NaT]) # check DatetimeIndex non-monotonic path assert getattr(obj, arg_op)() == 1 - with tm.assert_produces_warning(FutureWarning, match=msg): - result = getattr(obj, arg_op)(skipna=False) - if klass is Series: - assert np.isnan(result) - else: - assert result == -1 + with pytest.raises(ValueError, match="Encountered an NA value"): + getattr(obj, arg_op)(skipna=False) @pytest.mark.parametrize("opname", ["max", "min"]) @pytest.mark.parametrize("dtype", ["M8[ns]", "datetime64[ns, UTC]"]) @@ -175,40 +161,38 @@ def test_argminmax(self): obj = Index([np.nan, 1, np.nan, 2]) assert obj.argmin() == 1 assert obj.argmax() == 3 - msg = "The behavior of Index.argmax/argmin with skipna=False and NAs" - with tm.assert_produces_warning(FutureWarning, match=msg): - assert obj.argmin(skipna=False) == -1 - with tm.assert_produces_warning(FutureWarning, match=msg): - assert obj.argmax(skipna=False) == -1 + with pytest.raises(ValueError, match="Encountered an NA value"): + obj.argmin(skipna=False) + with pytest.raises(ValueError, match="Encountered an NA value"): + obj.argmax(skipna=False) obj = Index([np.nan]) - with tm.assert_produces_warning(FutureWarning, match=msg): - assert obj.argmin() == -1 - with tm.assert_produces_warning(FutureWarning, match=msg): - assert obj.argmax() == -1 - with tm.assert_produces_warning(FutureWarning, match=msg): - assert obj.argmin(skipna=False) == -1 - with tm.assert_produces_warning(FutureWarning, match=msg): - assert obj.argmax(skipna=False) == -1 + with pytest.raises(ValueError, match="Encountered all NA values"): + obj.argmin() + with pytest.raises(ValueError, match="Encountered all NA values"): + obj.argmax() + with pytest.raises(ValueError, match="Encountered all NA values"): + obj.argmin(skipna=False) + with pytest.raises(ValueError, match="Encountered all NA values"): + obj.argmax(skipna=False) - msg = "The behavior of DatetimeIndex.argmax/argmin with skipna=False and NAs" obj = Index([NaT, datetime(2011, 11, 1), datetime(2011, 11, 2), NaT]) assert obj.argmin() == 1 assert obj.argmax() == 2 - with tm.assert_produces_warning(FutureWarning, match=msg): - assert obj.argmin(skipna=False) == -1 - with tm.assert_produces_warning(FutureWarning, match=msg): - assert obj.argmax(skipna=False) == -1 + with pytest.raises(ValueError, match="Encountered an NA value"): + obj.argmin(skipna=False) + with pytest.raises(ValueError, match="Encountered an NA value"): + obj.argmax(skipna=False) obj = Index([NaT]) - with tm.assert_produces_warning(FutureWarning, match=msg): - assert obj.argmin() == -1 - with tm.assert_produces_warning(FutureWarning, match=msg): - assert obj.argmax() == -1 - with tm.assert_produces_warning(FutureWarning, match=msg): - assert obj.argmin(skipna=False) == -1 - with tm.assert_produces_warning(FutureWarning, match=msg): - assert obj.argmax(skipna=False) == -1 + with pytest.raises(ValueError, match="Encountered all NA values"): + obj.argmin() + with pytest.raises(ValueError, match="Encountered all NA values"): + obj.argmax() + with pytest.raises(ValueError, match="Encountered all NA values"): + obj.argmin(skipna=False) + with pytest.raises(ValueError, match="Encountered all NA values"): + obj.argmax(skipna=False) @pytest.mark.parametrize("op, expected_col", [["max", "a"], ["min", "b"]]) def test_same_tz_min_max_axis_1(self, op, expected_col): @@ -841,26 +825,16 @@ def test_idxmin_dt64index(self, unit): # GH#43587 should have NaT instead of NaN dti = DatetimeIndex(["NaT", "2015-02-08", "NaT"]).as_unit(unit) ser = Series([1.0, 2.0, np.nan], index=dti) - msg = "The behavior of Series.idxmin with all-NA values" - with tm.assert_produces_warning(FutureWarning, match=msg): - res = ser.idxmin(skipna=False) - assert res is NaT - msg = "The behavior of Series.idxmax with all-NA values" - with tm.assert_produces_warning(FutureWarning, match=msg): - res = ser.idxmax(skipna=False) - assert res is NaT + with pytest.raises(ValueError, match="Encountered an NA value"): + ser.idxmin(skipna=False) + with pytest.raises(ValueError, match="Encountered an NA value"): + ser.idxmax(skipna=False) df = ser.to_frame() - msg = "The behavior of DataFrame.idxmin with all-NA values" - with tm.assert_produces_warning(FutureWarning, match=msg): - res = df.idxmin(skipna=False) - assert res.dtype == f"M8[{unit}]" - assert res.isna().all() - msg = "The behavior of DataFrame.idxmax with all-NA values" - with tm.assert_produces_warning(FutureWarning, match=msg): - res = df.idxmax(skipna=False) - assert res.dtype == f"M8[{unit}]" - assert res.isna().all() + with pytest.raises(ValueError, match="Encountered an NA value"): + df.idxmin(skipna=False) + with pytest.raises(ValueError, match="Encountered an NA value"): + df.idxmax(skipna=False) def test_idxmin(self): # test idxmin @@ -872,9 +846,8 @@ def test_idxmin(self): # skipna or no assert string_series[string_series.idxmin()] == string_series.min() - msg = "The behavior of Series.idxmin" - with tm.assert_produces_warning(FutureWarning, match=msg): - assert isna(string_series.idxmin(skipna=False)) + with pytest.raises(ValueError, match="Encountered an NA value"): + string_series.idxmin(skipna=False) # no NaNs nona = string_series.dropna() @@ -883,8 +856,8 @@ def test_idxmin(self): # all NaNs allna = string_series * np.nan - with tm.assert_produces_warning(FutureWarning, match=msg): - assert isna(allna.idxmin()) + with pytest.raises(ValueError, match="Encountered all NA values"): + allna.idxmin() # datetime64[ns] s = Series(date_range("20130102", periods=6)) @@ -905,8 +878,7 @@ def test_idxmax(self): # skipna or no assert string_series[string_series.idxmax()] == string_series.max() - msg = "The behavior of Series.idxmax with all-NA values" - with tm.assert_produces_warning(FutureWarning, match=msg): + with pytest.raises(ValueError, match="Encountered an NA value"): assert isna(string_series.idxmax(skipna=False)) # no NaNs @@ -916,9 +888,8 @@ def test_idxmax(self): # all NaNs allna = string_series * np.nan - msg = "The behavior of Series.idxmax with all-NA values" - with tm.assert_produces_warning(FutureWarning, match=msg): - assert isna(allna.idxmax()) + with pytest.raises(ValueError, match="Encountered all NA values"): + allna.idxmax() s = Series(date_range("20130102", periods=6)) result = s.idxmax() @@ -1175,12 +1146,12 @@ def test_idxminmax_object_dtype(self, using_infer_string): msg = "'>' not supported between instances of 'float' and 'str'" with pytest.raises(TypeError, match=msg): ser3.idxmax() - with pytest.raises(TypeError, match=msg): + with pytest.raises(ValueError, match="Encountered an NA value"): ser3.idxmax(skipna=False) msg = "'<' not supported between instances of 'float' and 'str'" with pytest.raises(TypeError, match=msg): ser3.idxmin() - with pytest.raises(TypeError, match=msg): + with pytest.raises(ValueError, match="Encountered an NA value"): ser3.idxmin(skipna=False) def test_idxminmax_object_frame(self): @@ -1228,14 +1199,12 @@ def test_idxminmax_with_inf(self): s = Series([0, -np.inf, np.inf, np.nan]) assert s.idxmin() == 1 - msg = "The behavior of Series.idxmin with all-NA values" - with tm.assert_produces_warning(FutureWarning, match=msg): - assert np.isnan(s.idxmin(skipna=False)) + with pytest.raises(ValueError, match="Encountered an NA value"): + s.idxmin(skipna=False) assert s.idxmax() == 2 - msg = "The behavior of Series.idxmax with all-NA values" - with tm.assert_produces_warning(FutureWarning, match=msg): - assert np.isnan(s.idxmax(skipna=False)) + with pytest.raises(ValueError, match="Encountered an NA value"): + s.idxmax(skipna=False) def test_sum_uint64(self): # GH 53401 diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index ed125ece349a9..ce41f1e76de79 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -296,6 +296,7 @@ def check_fun_data( self, testfunc, targfunc, + testar, testarval, targarval, skipna, @@ -319,6 +320,13 @@ def check_fun_data( else: targ = bool(targ) + if testfunc.__name__ in ["nanargmax", "nanargmin"] and ( + testar.startswith("arr_nan") + or (testar.endswith("nan") and (not skipna or axis == 1)) + ): + with pytest.raises(ValueError, match="Encountered .* NA value"): + testfunc(testarval, axis=axis, skipna=skipna, **kwargs) + return res = testfunc(testarval, axis=axis, skipna=skipna, **kwargs) if ( @@ -350,6 +358,7 @@ def check_fun_data( self.check_fun_data( testfunc, targfunc, + testar, testarval2, targarval2, skipna=skipna, @@ -370,6 +379,7 @@ def check_fun( self.check_fun_data( testfunc, targfunc, + testar, testarval, targarval, skipna=skipna,
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Ref: #33941, #51276 This is complicated by #57745 - we still need a proper deprecation for groupby's idxmin/idxmax. For DataFrame with EAs and axis=1, we use groupby's implementation. So I'm leaving that deprecation in place for now, and we can enforce it after groupby's is deprecated and ready to be enforced.
https://api.github.com/repos/pandas-dev/pandas/pulls/57971
2024-03-23T01:14:13Z
2024-03-25T17:55:56Z
2024-03-25T17:55:56Z
2024-03-27T18:05:06Z
Implement hash_join for merges
diff --git a/asv_bench/benchmarks/join_merge.py b/asv_bench/benchmarks/join_merge.py index ce64304731116..a6c6990892d38 100644 --- a/asv_bench/benchmarks/join_merge.py +++ b/asv_bench/benchmarks/join_merge.py @@ -328,6 +328,23 @@ def time_i8merge(self, how): merge(self.left, self.right, how=how) +class UniqueMerge: + params = [4_000_000, 1_000_000] + param_names = ["unique_elements"] + + def setup(self, unique_elements): + N = 1_000_000 + self.left = DataFrame({"a": np.random.randint(1, unique_elements, (N,))}) + self.right = DataFrame({"a": np.random.randint(1, unique_elements, (N,))}) + uniques = self.right.a.drop_duplicates() + self.right["a"] = concat( + [uniques, Series(np.arange(0, -(N - len(uniques)), -1))], ignore_index=True + ) + + def time_unique_merge(self, unique_elements): + merge(self.left, self.right, how="inner") + + class MergeDatetime: params = [ [ diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index f225d384888e3..f748f6e23e003 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -286,6 +286,7 @@ Performance improvements - Performance improvement in :meth:`RangeIndex.join` returning a :class:`RangeIndex` instead of a :class:`Index` when possible. (:issue:`57651`, :issue:`57752`) - Performance improvement in :meth:`RangeIndex.reindex` returning a :class:`RangeIndex` instead of a :class:`Index` when possible. (:issue:`57647`, :issue:`57752`) - Performance improvement in :meth:`RangeIndex.take` returning a :class:`RangeIndex` instead of a :class:`Index` when possible. (:issue:`57445`, :issue:`57752`) +- Performance improvement in :func:`merge` if hash-join can be used (:issue:`57970`) - Performance improvement in ``DataFrameGroupBy.__len__`` and ``SeriesGroupBy.__len__`` (:issue:`57595`) - Performance improvement in indexing operations for string dtypes (:issue:`56997`) - Performance improvement in unary methods on a :class:`RangeIndex` returning a :class:`RangeIndex` instead of a :class:`Index` when possible. (:issue:`57825`) diff --git a/pandas/_libs/hashtable.pyi b/pandas/_libs/hashtable.pyi index 3725bfa3362d9..7a810a988e50e 100644 --- a/pandas/_libs/hashtable.pyi +++ b/pandas/_libs/hashtable.pyi @@ -16,7 +16,7 @@ def unique_label_indices( class Factorizer: count: int uniques: Any - def __init__(self, size_hint: int) -> None: ... + def __init__(self, size_hint: int, uses_mask: bool = False) -> None: ... def get_count(self) -> int: ... def factorize( self, @@ -25,6 +25,9 @@ class Factorizer: na_value=..., mask=..., ) -> npt.NDArray[np.intp]: ... + def hash_inner_join( + self, values: np.ndarray, mask=... + ) -> tuple[np.ndarray, np.ndarray]: ... class ObjectFactorizer(Factorizer): table: PyObjectHashTable @@ -216,6 +219,9 @@ class HashTable: mask=..., ignore_na: bool = True, ) -> tuple[np.ndarray, npt.NDArray[np.intp]]: ... # np.ndarray[subclass-specific] + def hash_inner_join( + self, values: np.ndarray, mask=... + ) -> tuple[np.ndarray, np.ndarray]: ... class Complex128HashTable(HashTable): ... class Complex64HashTable(HashTable): ... diff --git a/pandas/_libs/hashtable.pyx b/pandas/_libs/hashtable.pyx index 070533ba999c7..97fae1d6480ce 100644 --- a/pandas/_libs/hashtable.pyx +++ b/pandas/_libs/hashtable.pyx @@ -70,7 +70,7 @@ cdef class Factorizer: cdef readonly: Py_ssize_t count - def __cinit__(self, size_hint: int): + def __cinit__(self, size_hint: int, uses_mask: bool = False): self.count = 0 def get_count(self) -> int: @@ -79,13 +79,16 @@ cdef class Factorizer: def factorize(self, values, na_sentinel=-1, na_value=None, mask=None) -> np.ndarray: raise NotImplementedError + def hash_inner_join(self, values, mask=None): + raise NotImplementedError + cdef class ObjectFactorizer(Factorizer): cdef public: PyObjectHashTable table ObjectVector uniques - def __cinit__(self, size_hint: int): + def __cinit__(self, size_hint: int, uses_mask: bool = False): self.table = PyObjectHashTable(size_hint) self.uniques = ObjectVector() diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in index f9abd574dae01..e3a9102fec395 100644 --- a/pandas/_libs/hashtable_class_helper.pxi.in +++ b/pandas/_libs/hashtable_class_helper.pxi.in @@ -557,6 +557,49 @@ cdef class {{name}}HashTable(HashTable): self.table.vals[k] = i self.na_position = na_position + @cython.wraparound(False) + @cython.boundscheck(False) + def hash_inner_join(self, const {{dtype}}_t[:] values, const uint8_t[:] mask = None) -> tuple[ndarray, ndarray]: + cdef: + Py_ssize_t i, n = len(values) + {{c_type}} val + khiter_t k + Int64Vector locs = Int64Vector() + Int64Vector self_locs = Int64Vector() + Int64VectorData *l + Int64VectorData *sl + int8_t na_position = self.na_position + + l = &locs.data + sl = &self_locs.data + + if self.uses_mask and mask is None: + raise NotImplementedError # pragma: no cover + + with nogil: + for i in range(n): + if self.uses_mask and mask[i]: + if self.na_position == -1: + continue + if needs_resize(l.size, l.capacity): + with gil: + locs.resize(locs.data.capacity * 4) + self_locs.resize(locs.data.capacity * 4) + append_data_int64(l, i) + append_data_int64(sl, na_position) + else: + val = {{to_c_type}}(values[i]) + k = kh_get_{{dtype}}(self.table, val) + if k != self.table.n_buckets: + if needs_resize(l.size, l.capacity): + with gil: + locs.resize(locs.data.capacity * 4) + self_locs.resize(locs.data.capacity * 4) + append_data_int64(l, i) + append_data_int64(sl, self.table.vals[k]) + + return self_locs.to_array(), locs.to_array() + @cython.boundscheck(False) def lookup(self, const {{dtype}}_t[:] values, const uint8_t[:] mask = None) -> ndarray: # -> np.ndarray[np.intp] @@ -879,8 +922,8 @@ cdef class {{name}}Factorizer(Factorizer): {{name}}HashTable table {{name}}Vector uniques - def __cinit__(self, size_hint: int): - self.table = {{name}}HashTable(size_hint) + def __cinit__(self, size_hint: int, uses_mask: bool = False): + self.table = {{name}}HashTable(size_hint, uses_mask=uses_mask) self.uniques = {{name}}Vector() def factorize(self, const {{c_type}}[:] values, @@ -911,6 +954,9 @@ cdef class {{name}}Factorizer(Factorizer): self.count = len(self.uniques) return labels + def hash_inner_join(self, const {{c_type}}[:] values, const uint8_t[:] mask = None) -> tuple[np.ndarray, np.ndarray]: + return self.table.hash_inner_join(values, mask) + {{endfor}} diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 8ea2ac24e13c8..2cd065d03ff53 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -1780,7 +1780,10 @@ def get_join_indexers_non_unique( np.ndarray[np.intp] Indexer into right. """ - lkey, rkey, count = _factorize_keys(left, right, sort=sort) + lkey, rkey, count = _factorize_keys(left, right, sort=sort, how=how) + if count == -1: + # hash join + return lkey, rkey if how == "left": lidx, ridx = libjoin.left_outer_join(lkey, rkey, count, sort=sort) elif how == "right": @@ -2385,7 +2388,10 @@ def _left_join_on_index( def _factorize_keys( - lk: ArrayLike, rk: ArrayLike, sort: bool = True + lk: ArrayLike, + rk: ArrayLike, + sort: bool = True, + how: str | None = None, ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]: """ Encode left and right keys as enumerated types. @@ -2401,6 +2407,9 @@ def _factorize_keys( sort : bool, defaults to True If True, the encoding is done such that the unique elements in the keys are sorted. + how: str, optional + Used to determine if we can use hash-join. If not given, then just factorize + keys. Returns ------- @@ -2409,7 +2418,8 @@ def _factorize_keys( np.ndarray[np.intp] Right (resp. left if called with `key='right'`) labels, as enumerated type. int - Number of unique elements in union of left and right labels. + Number of unique elements in union of left and right labels. -1 if we used + a hash-join. See Also -------- @@ -2527,28 +2537,41 @@ def _factorize_keys( klass, lk, rk = _convert_arrays_and_get_rizer_klass(lk, rk) - rizer = klass(max(len(lk), len(rk))) + rizer = klass( + max(len(lk), len(rk)), + uses_mask=isinstance(rk, (BaseMaskedArray, ArrowExtensionArray)), + ) if isinstance(lk, BaseMaskedArray): assert isinstance(rk, BaseMaskedArray) - llab = rizer.factorize(lk._data, mask=lk._mask) - rlab = rizer.factorize(rk._data, mask=rk._mask) + lk_data, lk_mask = lk._data, lk._mask + rk_data, rk_mask = rk._data, rk._mask elif isinstance(lk, ArrowExtensionArray): assert isinstance(rk, ArrowExtensionArray) # we can only get here with numeric dtypes # TODO: Remove when we have a Factorizer for Arrow - llab = rizer.factorize( - lk.to_numpy(na_value=1, dtype=lk.dtype.numpy_dtype), mask=lk.isna() - ) - rlab = rizer.factorize( - rk.to_numpy(na_value=1, dtype=lk.dtype.numpy_dtype), mask=rk.isna() - ) + lk_data = lk.to_numpy(na_value=1, dtype=lk.dtype.numpy_dtype) + rk_data = rk.to_numpy(na_value=1, dtype=lk.dtype.numpy_dtype) + lk_mask, rk_mask = lk.isna(), rk.isna() else: # Argument 1 to "factorize" of "ObjectFactorizer" has incompatible type # "Union[ndarray[Any, dtype[signedinteger[_64Bit]]], # ndarray[Any, dtype[object_]]]"; expected "ndarray[Any, dtype[object_]]" - llab = rizer.factorize(lk) # type: ignore[arg-type] - rlab = rizer.factorize(rk) # type: ignore[arg-type] + lk_data, rk_data = lk, rk # type: ignore[assignment] + lk_mask, rk_mask = None, None + + hash_join_available = how == "inner" and not sort and lk.dtype.kind in "iufb" + if hash_join_available: + rlab = rizer.factorize(rk_data, mask=rk_mask) + if rizer.get_count() == len(rlab): + ridx, lidx = rizer.hash_inner_join(lk_data, lk_mask) + return lidx, ridx, -1 + else: + llab = rizer.factorize(lk_data, mask=lk_mask) + else: + llab = rizer.factorize(lk_data, mask=lk_mask) + rlab = rizer.factorize(rk_data, mask=rk_mask) + assert llab.dtype == np.dtype(np.intp), llab.dtype assert rlab.dtype == np.dtype(np.intp), rlab.dtype diff --git a/scripts/run_stubtest.py b/scripts/run_stubtest.py index 6307afa1bc822..df88c61061f12 100644 --- a/scripts/run_stubtest.py +++ b/scripts/run_stubtest.py @@ -44,6 +44,7 @@ "pandas._libs.hashtable.HashTable.set_na", "pandas._libs.hashtable.HashTable.sizeof", "pandas._libs.hashtable.HashTable.unique", + "pandas._libs.hashtable.HashTable.hash_inner_join", # stubtest might be too sensitive "pandas._libs.lib.NoDefault", "pandas._libs.lib._NoDefault.no_default",
cc @mroeschke Our abstraction in merges is bad, this makes it a little worse unfortunately. But it enables a potentially huge performance improvement for joins that could be hash joins. I am using "right" to make the decision, because left determines the result order, which means that we would have to sort after we are finished which gives the performance improvement back. using right makes this problem go away. We get time complexity O(m+n) here over O(m*n) with a non-trivial factor as before This makes adding semi joins pretty easy as well, which is nice next to the performance improvements here. ``` | Change | Before [38086f11] <backtest> | After [3b6b787e] <to> | Ratio | Benchmark (Parameter) | |----------|--------------------------------|-------------------------|---------|-----------------------------------------------------------------------------| | - | 193±4ms | 165±5ms | 0.85 | join_merge.I8Merge.time_i8merge('inner') | | - | 7.76±0.2ms | 6.53±0.06ms | 0.84 | join_merge.Merge.time_merge_2intkey(False) | | - | 1.03±0.02ms | 834±4μs | 0.81 | join_merge.Merge.time_merge_dataframe_integer_2key(False) | | - | 485±2μs | 339±5μs | 0.7 | join_merge.Merge.time_merge_dataframe_integer_key(False) | | - | 3.29±0.2ms | 2.29±0.07ms | 0.7 | join_merge.MergeDatetime.time_merge(('ms', 'ms'), 'Europe/Brussels', False) | | - | 3.31±0.07ms | 2.27±0.07ms | 0.69 | join_merge.MergeDatetime.time_merge(('ms', 'ms'), None, False) | | - | 2.79±0.09ms | 1.77±0.01ms | 0.63 | join_merge.MergeDatetime.time_merge(('ns', 'ms'), 'Europe/Brussels', False) | | - | 2.89±0.04ms | 1.78±0.05ms | 0.62 | join_merge.MergeDatetime.time_merge(('ns', 'ms'), None, False) | | - | 2.57±0.09ms | 1.56±0.03ms | 0.61 | join_merge.MergeDatetime.time_merge(('ns', 'ns'), None, False) | | - | 1.97±0.05ms | 1.18±0.02ms | 0.6 | join_merge.MergeEA.time_merge('Float32', False) | | - | 1.84±0.02ms | 1.10±0.03ms | 0.6 | join_merge.MergeEA.time_merge('UInt16', False) | | - | 2.09±0.04ms | 1.24±0.01ms | 0.59 | join_merge.MergeEA.time_merge('UInt64', False) | | - | 2.10±0.09ms | 1.22±0.01ms | 0.58 | join_merge.MergeEA.time_merge('Float64', False) | | - | 2.09±0.08ms | 1.22±0.01ms | 0.58 | join_merge.MergeEA.time_merge('UInt32', False) | | - | 2.70±0.1ms | 1.54±0.02ms | 0.57 | join_merge.MergeDatetime.time_merge(('ns', 'ns'), 'Europe/Brussels', False) | | - | 1.72±0.02ms | 971±20μs | 0.57 | join_merge.MergeEA.time_merge('Int16', False) | | - | 1.76±0.03ms | 973±10μs | 0.55 | join_merge.MergeEA.time_merge('Int32', False) | | - | 1.94±0.09ms | 1.07±0.03ms | 0.55 | join_merge.MergeEA.time_merge('Int64', False) | | - | 57.0±2ms | 21.7±0.3ms | 0.38 | join_merge.UniqueMerge.time_unique_merge(1000000) | | - | 106±7ms | 34.1±0.3ms | 0.32 | join_merge.UniqueMerge.time_unique_merge(4000000) | ```
https://api.github.com/repos/pandas-dev/pandas/pulls/57970
2024-03-22T22:27:29Z
2024-03-24T00:41:11Z
2024-03-24T00:41:11Z
2024-03-24T00:41:14Z
WEB: Updating active/inactive core devs
diff --git a/web/pandas/config.yml b/web/pandas/config.yml index 05fdea13cab43..74e7fda2e7983 100644 --- a/web/pandas/config.yml +++ b/web/pandas/config.yml @@ -72,11 +72,9 @@ blog: - https://phofl.github.io/feeds/pandas.atom.xml maintainers: active: - - wesm - jorisvandenbossche - TomAugspurger - jreback - - gfyoung - WillAyd - mroeschke - jbrockmendel @@ -93,7 +91,6 @@ maintainers: - fangchenli - twoertwein - lithomas1 - - mzeitlin11 - lukemanley - noatamir inactive: @@ -108,6 +105,9 @@ maintainers: - jschendel - charlesdong1991 - dsaxton + - wesm + - gfyoung + - mzeitlin11 workgroups: coc: name: Code of Conduct @@ -121,13 +121,12 @@ workgroups: finance: name: Finance contact: finance@pandas.pydata.org - responsibilities: "Approve the project expenses." + responsibilities: "Manage the funding. Coordinate the request of grants. Approve the project expenses." members: - - Wes McKinney + - Matthew Roeschke - Jeff Reback - Joris Van den Bossche - - Tom Augspurger - - Matthew Roeschke + - Patrick Hoefler infrastructure: name: Infrastructure contact: infrastructure@pandas.pydata.org
I've checked with the devs who weren't active in pandas recently to see if they wished to become inactive, and few were happy to become inactive. Besides to manage expectations of other core devs and the community, this is relevant for the decision making proposed in [PDEP-1](https://github.com/pandas-dev/pandas/pull/53576/files). With the current proposal, the number of maintainers to have a quorum is lowered from 12 to 11 after this PR, and it'll have an impact if more people become a maintainer. @bashtage I couldn't get an answer from you via email (I sent two emails). If you'd like to remain active that's totally fine, just let me know.
https://api.github.com/repos/pandas-dev/pandas/pulls/57969
2024-03-22T22:02:25Z
2024-03-26T17:28:26Z
2024-03-26T17:28:26Z
2024-03-26T17:28:33Z
BUG: #57954 encoding ignored for filelike
diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index b234a6b78e051..7ecd8cd6d5012 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -1310,6 +1310,16 @@ def _check_file_or_buffer(self, f, engine: CSVEngine) -> None: raise ValueError( "The 'python' engine cannot iterate through this file buffer." ) + if hasattr(f, "encoding"): + file_encoding = f.encoding + orig_reader_enc = self.orig_options.get("encoding", None) + any_none = file_encoding is None or orig_reader_enc is None + if file_encoding != orig_reader_enc and not any_none: + file_path = getattr(f, "name", None) + raise ValueError( + f"The specified reader encoding {orig_reader_enc} is different " + f"from the encoding {file_encoding} of file {file_path}." + ) def _clean_options( self, options: dict[str, Any], engine: CSVEngine @@ -1485,6 +1495,7 @@ def _make_engine( "pyarrow": ArrowParserWrapper, "python-fwf": FixedWidthFieldParser, } + if engine not in mapping: raise ValueError( f"Unknown engine: {engine} (valid options are {mapping.keys()})" diff --git a/pandas/tests/io/parser/test_c_parser_only.py b/pandas/tests/io/parser/test_c_parser_only.py index 090235c862a2a..98a460f221592 100644 --- a/pandas/tests/io/parser/test_c_parser_only.py +++ b/pandas/tests/io/parser/test_c_parser_only.py @@ -511,7 +511,7 @@ def __next__(self): def test_buffer_rd_bytes_bad_unicode(c_parser_only): # see gh-22748 t = BytesIO(b"\xb0") - t = TextIOWrapper(t, encoding="ascii", errors="surrogateescape") + t = TextIOWrapper(t, encoding="UTF-8", errors="surrogateescape") msg = "'utf-8' codec can't encode character" with pytest.raises(UnicodeError, match=msg): c_parser_only.read_csv(t, encoding="UTF-8") diff --git a/pandas/tests/io/parser/test_textreader.py b/pandas/tests/io/parser/test_textreader.py index 6aeed2377a3aa..eeb783f1957b7 100644 --- a/pandas/tests/io/parser/test_textreader.py +++ b/pandas/tests/io/parser/test_textreader.py @@ -48,6 +48,13 @@ def test_StringIO(self, csv_path): reader = TextReader(src, header=None) reader.read() + def test_encoding_mismatch_warning(self, csv_path): + # GH-57954 + with open(csv_path, encoding="UTF-8") as f: + msg = "latin1 is different from the encoding" + with pytest.raises(ValueError, match=msg): + read_csv(f, encoding="latin1") + def test_string_factorize(self): # should this be optional? data = "a\nb\na\nb\na"
- [x] closes #57954 - [x] [Tests added and passed if fixing a bug or adding a new feature - [x] All [code checks passed]
https://api.github.com/repos/pandas-dev/pandas/pulls/57968
2024-03-22T20:58:01Z
2024-03-28T18:10:56Z
2024-03-28T18:10:56Z
2024-03-28T23:30:31Z
CLN: Enforce verbose parameter deprecation in read_csv/read_table
diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index ef561d50066d1..741591be25bf9 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -256,6 +256,7 @@ Removal of prior version deprecations/changes - Removed unused arguments ``*args`` and ``**kwargs`` in :class:`Resampler` methods (:issue:`50977`) - Unrecognized timezones when parsing strings to datetimes now raises a ``ValueError`` (:issue:`51477`) - Removed the :class:`Grouper` attributes ``ax``, ``groups``, ``indexer``, and ``obj`` (:issue:`51206`, :issue:`51182`) +- Removed deprecated keyword ``verbose`` on :func:`read_csv` and :func:`read_table` (:issue:`56556`) - Removed the attribute ``dtypes`` from :class:`.DataFrameGroupBy` (:issue:`51997`) .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index 01c7de0c6f2b3..c29cdbcf5975e 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -6,7 +6,6 @@ from csv import ( QUOTE_NONE, QUOTE_NONNUMERIC, ) -import time import warnings from pandas.util._exceptions import find_stack_level @@ -344,10 +343,9 @@ cdef class TextReader: object true_values, false_values object handle object orig_header - bint na_filter, keep_default_na, verbose, has_usecols, has_mi_columns + bint na_filter, keep_default_na, has_usecols, has_mi_columns bint allow_leading_cols uint64_t parser_start # this is modified after __init__ - list clocks const char *encoding_errors kh_str_starts_t *false_set kh_str_starts_t *true_set @@ -400,7 +398,6 @@ cdef class TextReader: bint allow_leading_cols=True, skiprows=None, skipfooter=0, # int64_t - bint verbose=False, float_precision=None, bint skip_blank_lines=True, encoding_errors=b"strict", @@ -417,9 +414,6 @@ cdef class TextReader: self.parser = parser_new() self.parser.chunksize = tokenize_chunksize - # For timekeeping - self.clocks = [] - self.parser.usecols = (usecols is not None) self._setup_parser_source(source) @@ -507,8 +501,6 @@ cdef class TextReader: self.converters = converters self.na_filter = na_filter - self.verbose = verbose - if float_precision == "round_trip": # see gh-15140 self.parser.double_converter = round_trip_wrapper @@ -896,8 +888,6 @@ cdef class TextReader: int64_t buffered_lines int64_t irows - self._start_clock() - if rows is not None: irows = rows buffered_lines = self.parser.lines - self.parser_start @@ -915,12 +905,8 @@ cdef class TextReader: if self.parser_start >= self.parser.lines: raise StopIteration - self._end_clock("Tokenization") - self._start_clock() columns = self._convert_column_data(rows) - self._end_clock("Type conversion") - self._start_clock() if len(columns) > 0: rows_read = len(list(columns.values())[0]) # trim @@ -929,18 +915,8 @@ cdef class TextReader: parser_trim_buffers(self.parser) self.parser_start -= rows_read - self._end_clock("Parser memory cleanup") - return columns - cdef _start_clock(self): - self.clocks.append(time.time()) - - cdef _end_clock(self, str what): - if self.verbose: - elapsed = time.time() - self.clocks.pop(-1) - print(f"{what} took: {elapsed * 1000:.2f} ms") - def set_noconvert(self, i: int) -> None: self.noconvert.add(i) diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py index 7b06c6b6b0d39..3bbb7c83345e5 100644 --- a/pandas/io/parsers/base_parser.py +++ b/pandas/io/parsers/base_parser.py @@ -519,7 +519,6 @@ def _convert_to_ndarrays( dct: Mapping, na_values, na_fvalues, - verbose: bool = False, converters=None, dtypes=None, ) -> dict[Any, np.ndarray]: @@ -596,8 +595,6 @@ def _convert_to_ndarrays( cvals = self._cast_types(cvals, cast_type, c) result[c] = cvals - if verbose and na_count: - print(f"Filled {na_count} NA values in column {c!s}") return result @final @@ -1236,7 +1233,6 @@ def converter(*date_cols, col: Hashable): "usecols": None, # 'iterator': False, "chunksize": None, - "verbose": False, "encoding": None, "compression": None, "skip_blank_lines": True, diff --git a/pandas/io/parsers/python_parser.py b/pandas/io/parsers/python_parser.py index dbda47172f6ac..44210b6979827 100644 --- a/pandas/io/parsers/python_parser.py +++ b/pandas/io/parsers/python_parser.py @@ -110,8 +110,6 @@ def __init__(self, f: ReadCsvBuffer[str] | list, **kwds) -> None: if "has_index_names" in kwds: self.has_index_names = kwds["has_index_names"] - self.verbose = kwds["verbose"] - self.thousands = kwds["thousands"] self.decimal = kwds["decimal"] @@ -372,7 +370,6 @@ def _convert_data( data, clean_na_values, clean_na_fvalues, - self.verbose, clean_conv, clean_dtypes, ) diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index 9f2f208d8c350..b234a6b78e051 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -116,7 +116,6 @@ class _read_shared(TypedDict, Generic[HashableT], total=False): ) keep_default_na: bool na_filter: bool - verbose: bool | lib.NoDefault skip_blank_lines: bool parse_dates: bool | Sequence[Hashable] | None infer_datetime_format: bool | lib.NoDefault @@ -295,10 +294,6 @@ class _read_shared(TypedDict, Generic[HashableT], total=False): Detect missing value markers (empty strings and the value of ``na_values``). In data without any ``NA`` values, passing ``na_filter=False`` can improve the performance of reading a large file. -verbose : bool, default False - Indicate number of ``NA`` values placed in non-numeric columns. - - .. deprecated:: 2.2.0 skip_blank_lines : bool, default True If ``True``, skip over blank lines rather than interpreting as ``NaN`` values. parse_dates : bool, None, list of Hashable, list of lists or dict of {{Hashable : \ @@ -556,7 +551,6 @@ class _Fwf_Defaults(TypedDict): "converters", "iterator", "dayfirst", - "verbose", "skipinitialspace", "low_memory", } @@ -755,7 +749,6 @@ def read_csv( | None = None, keep_default_na: bool = True, na_filter: bool = True, - verbose: bool | lib.NoDefault = lib.no_default, skip_blank_lines: bool = True, # Datetime Handling parse_dates: bool | Sequence[Hashable] | None = None, @@ -845,17 +838,6 @@ def read_csv( else: delim_whitespace = False - if verbose is not lib.no_default: - # GH#55569 - warnings.warn( - "The 'verbose' keyword in pd.read_csv is deprecated and " - "will be removed in a future version.", - FutureWarning, - stacklevel=find_stack_level(), - ) - else: - verbose = False - # locals() should never be modified kwds = locals().copy() del kwds["filepath_or_buffer"] @@ -958,7 +940,6 @@ def read_table( | None = None, keep_default_na: bool = True, na_filter: bool = True, - verbose: bool | lib.NoDefault = lib.no_default, skip_blank_lines: bool = True, # Datetime Handling parse_dates: bool | Sequence[Hashable] | None = None, @@ -1039,17 +1020,6 @@ def read_table( else: delim_whitespace = False - if verbose is not lib.no_default: - # GH#55569 - warnings.warn( - "The 'verbose' keyword in pd.read_table is deprecated and " - "will be removed in a future version.", - FutureWarning, - stacklevel=find_stack_level(), - ) - else: - verbose = False - # locals() should never be modified kwds = locals().copy() del kwds["filepath_or_buffer"] diff --git a/pandas/tests/io/parser/common/test_verbose.py b/pandas/tests/io/parser/common/test_verbose.py deleted file mode 100644 index c5490afba1e04..0000000000000 --- a/pandas/tests/io/parser/common/test_verbose.py +++ /dev/null @@ -1,82 +0,0 @@ -""" -Tests that work on both the Python and C engines but do not have a -specific classification into the other test modules. -""" - -from io import StringIO - -import pytest - -import pandas._testing as tm - -depr_msg = "The 'verbose' keyword in pd.read_csv is deprecated" - - -def test_verbose_read(all_parsers, capsys): - parser = all_parsers - data = """a,b,c,d -one,1,2,3 -one,1,2,3 -,1,2,3 -one,1,2,3 -,1,2,3 -,1,2,3 -one,1,2,3 -two,1,2,3""" - - if parser.engine == "pyarrow": - msg = "The 'verbose' option is not supported with the 'pyarrow' engine" - with pytest.raises(ValueError, match=msg): - with tm.assert_produces_warning( - FutureWarning, match=depr_msg, check_stacklevel=False - ): - parser.read_csv(StringIO(data), verbose=True) - return - - # Engines are verbose in different ways. - with tm.assert_produces_warning( - FutureWarning, match=depr_msg, check_stacklevel=False - ): - parser.read_csv(StringIO(data), verbose=True) - captured = capsys.readouterr() - - if parser.engine == "c": - assert "Tokenization took:" in captured.out - assert "Parser memory cleanup took:" in captured.out - else: # Python engine - assert captured.out == "Filled 3 NA values in column a\n" - - -def test_verbose_read2(all_parsers, capsys): - parser = all_parsers - data = """a,b,c,d -one,1,2,3 -two,1,2,3 -three,1,2,3 -four,1,2,3 -five,1,2,3 -,1,2,3 -seven,1,2,3 -eight,1,2,3""" - - if parser.engine == "pyarrow": - msg = "The 'verbose' option is not supported with the 'pyarrow' engine" - with pytest.raises(ValueError, match=msg): - with tm.assert_produces_warning( - FutureWarning, match=depr_msg, check_stacklevel=False - ): - parser.read_csv(StringIO(data), verbose=True, index_col=0) - return - - with tm.assert_produces_warning( - FutureWarning, match=depr_msg, check_stacklevel=False - ): - parser.read_csv(StringIO(data), verbose=True, index_col=0) - captured = capsys.readouterr() - - # Engines are verbose in different ways. - if parser.engine == "c": - assert "Tokenization took:" in captured.out - assert "Parser memory cleanup took:" in captured.out - else: # Python engine - assert captured.out == "Filled 1 NA values in column a\n"
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/57966
2024-03-22T17:47:06Z
2024-03-22T19:07:49Z
2024-03-22T19:07:49Z
2024-04-07T21:49:27Z
BUG: Fix na_values dict not working on index column (#57547)
diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index ef561d50066d1..bce5c7927c72d 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -301,6 +301,7 @@ Bug fixes - Fixed bug in :meth:`Series.diff` allowing non-integer values for the ``periods`` argument. (:issue:`56607`) - Fixed bug in :meth:`Series.rank` that doesn't preserve missing values for nullable integers when ``na_option='keep'``. (:issue:`56976`) - Fixed bug in :meth:`Series.replace` and :meth:`DataFrame.replace` inconsistently replacing matching instances when ``regex=True`` and missing values are present. (:issue:`56599`) +- Fixed bug in :meth:`read_csv` raising ``TypeError`` when ``index_col`` is specified and ``na_values`` is a dict containing the key ``None``. (:issue:`57547`) Categorical ^^^^^^^^^^^ diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py index 7b06c6b6b0d39..bb9f1db0d05e8 100644 --- a/pandas/io/parsers/base_parser.py +++ b/pandas/io/parsers/base_parser.py @@ -487,6 +487,8 @@ def _agg_index(self, index, try_parse_dates: bool = True) -> Index: col_na_values, col_na_fvalues = _get_na_values( col_name, self.na_values, self.na_fvalues, self.keep_default_na ) + else: + col_na_values, col_na_fvalues = set(), set() clean_dtypes = self._clean_mapping(self.dtype) diff --git a/pandas/io/parsers/python_parser.py b/pandas/io/parsers/python_parser.py index dbda47172f6ac..21dcf5f2f9310 100644 --- a/pandas/io/parsers/python_parser.py +++ b/pandas/io/parsers/python_parser.py @@ -356,14 +356,15 @@ def _convert_data( if isinstance(self.na_values, dict): for col in self.na_values: - na_value = self.na_values[col] - na_fvalue = self.na_fvalues[col] + if col is not None: + na_value = self.na_values[col] + na_fvalue = self.na_fvalues[col] - if isinstance(col, int) and col not in self.orig_names: - col = self.orig_names[col] + if isinstance(col, int) and col not in self.orig_names: + col = self.orig_names[col] - clean_na_values[col] = na_value - clean_na_fvalues[col] = na_fvalue + clean_na_values[col] = na_value + clean_na_fvalues[col] = na_fvalue else: clean_na_values = self.na_values clean_na_fvalues = self.na_fvalues diff --git a/pandas/tests/io/parser/test_na_values.py b/pandas/tests/io/parser/test_na_values.py index ba0e3033321e4..1e370f649aef8 100644 --- a/pandas/tests/io/parser/test_na_values.py +++ b/pandas/tests/io/parser/test_na_values.py @@ -532,6 +532,47 @@ def test_na_values_dict_aliasing(all_parsers): tm.assert_dict_equal(na_values, na_values_copy) +def test_na_values_dict_null_column_name(all_parsers): + # see gh-57547 + parser = all_parsers + data = ",x,y\n\nMA,1,2\nNA,2,1\nOA,,3" + names = [None, "x", "y"] + na_values = {name: STR_NA_VALUES for name in names} + dtype = {None: "object", "x": "float64", "y": "float64"} + + if parser.engine == "pyarrow": + msg = "The pyarrow engine doesn't support passing a dict for na_values" + with pytest.raises(ValueError, match=msg): + parser.read_csv( + StringIO(data), + index_col=0, + header=0, + dtype=dtype, + names=names, + na_values=na_values, + keep_default_na=False, + ) + return + + expected = DataFrame( + {None: ["MA", "NA", "OA"], "x": [1.0, 2.0, np.nan], "y": [2.0, 1.0, 3.0]} + ) + + expected = expected.set_index(None) + + result = parser.read_csv( + StringIO(data), + index_col=0, + header=0, + dtype=dtype, + names=names, + na_values=na_values, + keep_default_na=False, + ) + + tm.assert_frame_equal(result, expected) + + def test_na_values_dict_col_index(all_parsers): # see gh-14203 data = "a\nfoo\n1"
- [x] closes #57547 - [x] [Tests added and passed] - [x] All [code checks passed] - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/v3.0.0.rst` file if fixing a bug or adding a new feature. In the read_csv method, pandas allows having na_values set as a dict, which in such case lets you decide which values are null for each column. In the occurence of one of the columns being `None`, no null values are applied to the column and it remains as it was. This specific case is what is being tested in the issue #57547. The problem was that in these particular conditions variables `col_na_values` and `col_na_fvalues` were not being set correctly causing a `TypeError`. All i had to do was correctly define these variables as empty sets in an `else` block. On the python engine this same logic was not yet programmed. I implemented it, by adding an if statement, ensuring na_values are only applied if the column is not `None`.
https://api.github.com/repos/pandas-dev/pandas/pulls/57965
2024-03-22T14:51:57Z
2024-04-09T17:08:34Z
2024-04-09T17:08:34Z
2024-04-09T22:44:43Z
DOC: fix closing sq. bracket in pandas.read_fwf example (#57959)
diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index 1ef2e65617c9b..9f2f208d8c350 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -1139,7 +1139,7 @@ def read_fwf( ``file://localhost/path/to/table.csv``. colspecs : list of tuple (int, int) or 'infer'. optional A list of tuples giving the extents of the fixed-width - fields of each line as half-open intervals (i.e., [from, to[ ). + fields of each line as half-open intervals (i.e., [from, to] ). String value 'infer' can be used to instruct the parser to try detecting the column specifications from the first 100 rows of the data which are not being skipped via skiprows (default='infer').
- change closing square bracket in colspecs description to correct "]" - [x] closes #57959 - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/57961
2024-03-22T05:07:58Z
2024-03-22T14:33:38Z
2024-03-22T14:33:38Z
2024-03-22T14:33:38Z
BUG: Groupby median on timedelta column with NaT returns odd value (#…
diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index f748f6e23e003..3964745e2e657 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -297,6 +297,7 @@ Performance improvements Bug fixes ~~~~~~~~~ - Fixed bug in :class:`SparseDtype` for equal comparison with na fill value. (:issue:`54770`) +- Fixed bug in :meth:`.DataFrameGroupBy.median` where nat values gave an incorrect result. (:issue:`57926`) - Fixed bug in :meth:`DataFrame.join` inconsistently setting result index name (:issue:`55815`) - Fixed bug in :meth:`DataFrame.to_string` that raised ``StopIteration`` with nested DataFrames. (:issue:`16098`) - Fixed bug in :meth:`DataFrame.update` bool dtype being converted to object (:issue:`55509`) diff --git a/pandas/_libs/groupby.pyi b/pandas/_libs/groupby.pyi index 95ac555303221..53f5f73624232 100644 --- a/pandas/_libs/groupby.pyi +++ b/pandas/_libs/groupby.pyi @@ -12,6 +12,7 @@ def group_median_float64( min_count: int = ..., # Py_ssize_t mask: np.ndarray | None = ..., result_mask: np.ndarray | None = ..., + is_datetimelike: bool = ..., # bint ) -> None: ... def group_cumprod( out: np.ndarray, # float64_t[:, ::1] diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 2ff45038d6a3e..c0b9ed42cb535 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -101,7 +101,11 @@ cdef float64_t median_linear_mask(float64_t* a, int n, uint8_t* mask) noexcept n return result -cdef float64_t median_linear(float64_t* a, int n) noexcept nogil: +cdef float64_t median_linear( + float64_t* a, + int n, + bint is_datetimelike=False +) noexcept nogil: cdef: int i, j, na_count = 0 float64_t* tmp @@ -111,9 +115,14 @@ cdef float64_t median_linear(float64_t* a, int n) noexcept nogil: return NaN # count NAs - for i in range(n): - if a[i] != a[i]: - na_count += 1 + if is_datetimelike: + for i in range(n): + if a[i] == NPY_NAT: + na_count += 1 + else: + for i in range(n): + if a[i] != a[i]: + na_count += 1 if na_count: if na_count == n: @@ -124,10 +133,16 @@ cdef float64_t median_linear(float64_t* a, int n) noexcept nogil: raise MemoryError() j = 0 - for i in range(n): - if a[i] == a[i]: - tmp[j] = a[i] - j += 1 + if is_datetimelike: + for i in range(n): + if a[i] != NPY_NAT: + tmp[j] = a[i] + j += 1 + else: + for i in range(n): + if a[i] == a[i]: + tmp[j] = a[i] + j += 1 a = tmp n -= na_count @@ -170,6 +185,7 @@ def group_median_float64( Py_ssize_t min_count=-1, const uint8_t[:, :] mask=None, uint8_t[:, ::1] result_mask=None, + bint is_datetimelike=False, ) -> None: """ Only aggregates on axis=0 @@ -228,7 +244,7 @@ def group_median_float64( ptr += _counts[0] for j in range(ngroups): size = _counts[j + 1] - out[j, i] = median_linear(ptr, size) + out[j, i] = median_linear(ptr, size, is_datetimelike) ptr += size diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index acf4c7bebf52d..8585ae3828247 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -415,6 +415,7 @@ def _call_cython_op( "last", "first", "sum", + "median", ]: func( out=result, @@ -427,7 +428,7 @@ def _call_cython_op( is_datetimelike=is_datetimelike, **kwargs, ) - elif self.how in ["sem", "std", "var", "ohlc", "prod", "median"]: + elif self.how in ["sem", "std", "var", "ohlc", "prod"]: if self.how in ["std", "sem"]: kwargs["is_datetimelike"] = is_datetimelike func( diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 00e781e6a7f07..7ec1598abf403 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -145,6 +145,15 @@ def test_len_nan_group(): assert len(df.groupby(["a", "b"])) == 0 +def test_groupby_timedelta_median(): + # issue 57926 + expected = Series(data=Timedelta("1d"), index=["foo"]) + df = DataFrame({"label": ["foo", "foo"], "timedelta": [pd.NaT, Timedelta("1d")]}) + gb = df.groupby("label")["timedelta"] + actual = gb.median() + tm.assert_series_equal(actual, expected, check_names=False) + + @pytest.mark.parametrize("keys", [["a"], ["a", "b"]]) def test_len_categorical(dropna, observed, keys): # GH#57595
…57926) Handle NaT correctly in group_median_float64 - [x] closes #57926 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/57957
2024-03-21T23:36:07Z
2024-03-26T20:57:52Z
2024-03-26T20:57:52Z
2024-04-10T12:09:36Z
Study on refactoring the series constructor
diff --git a/pandas/core/series.py b/pandas/core/series.py index 3adc2d2a44e73..407f2d48a5cb6 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -9,6 +9,7 @@ Iterable, Mapping, Sequence, + Sized, ) import operator import sys @@ -363,72 +364,114 @@ def __init__( copy: bool | None = None, ) -> None: allow_mgr = False - if ( - isinstance(data, SingleBlockManager) - and index is None - and dtype is None - and (copy is False or copy is None) - ): - if not allow_mgr: - # GH#52419 - warnings.warn( - f"Passing a {type(data).__name__} to {type(self).__name__} " - "is deprecated and will raise in a future version. " - "Use public APIs instead.", - DeprecationWarning, - stacklevel=2, - ) - data = data.copy(deep=False) - # GH#33357 called with just the SingleBlockManager - NDFrame.__init__(self, data) - self.name = name - return + deep = True # deep copy - is_pandas_object = isinstance(data, (Series, Index, ExtensionArray)) - data_dtype = getattr(data, "dtype", None) - original_dtype = dtype + # Series TASK 1: VALIDATE BASIC TYPES. + if dtype is not None: + dtype = self._validate_dtype(dtype) - if isinstance(data, (ExtensionArray, np.ndarray)): - if copy is not False: - if dtype is None or astype_is_view(data.dtype, pandas_dtype(dtype)): - data = data.copy() - if copy is None: - copy = False + copy_arrays = copy is True or copy is None # Arrays and ExtendedArrays + copy = copy is True # Series and Manager - if isinstance(data, SingleBlockManager) and not copy: - data = data.copy(deep=False) + # Series TASK 2: RAISE ERRORS ON KNOWN UNACEPPTED CASES. + if isinstance(data, MultiIndex): + raise NotImplementedError( + "initializing a Series from a MultiIndex is not supported" + ) - if not allow_mgr: - warnings.warn( - f"Passing a {type(data).__name__} to {type(self).__name__} " - "is deprecated and will raise in a future version. " - "Use public APIs instead.", - DeprecationWarning, - stacklevel=2, + if isinstance(data, SingleBlockManager): + if not (data.index.equals(index) or index is None) or copy: + # GH #19275 SingleBlockManager input should only be called internally + raise AssertionError( + "Cannot pass both SingleBlockManager " + "`data` argument and a different " + "`index` argument. `copy` must be False." + ) + + if isinstance(data, np.ndarray): + if len(data.dtype): + # GH#13296 we are dealing with a compound dtype, + # which should be treated as 2D. + raise ValueError( + "Cannot construct a Series from an ndarray with " + "compound dtype. Use DataFrame instead." ) + # Series TASK 3: CAPTURE INPUT SIGNATURE. + is_array = isinstance(data, (np.ndarray, ExtensionArray)) + is_pandas_object = isinstance(data, (Series, Index, ExtensionArray)) + original_dtype = dtype + original_data_type = type(data) + original_data_dtype = getattr(data, "dtype", None) + refs = None name = ibase.maybe_extract_name(name, data, type(self)) + na_value = na_value_for_dtype(pandas_dtype(dtype), compat=False) + + # Series TASK 4: DATA TRANSFORMATIONS. + if isinstance(data, Mapping): + # if is_dict_like(data) and not is_pandas_object and data is not None: + # Dict is SPECIAL case, since it's data has data values and index keys. + + # Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')] + # raises KeyError). Send it to Series for "standard" construction: + + # index = tuple(data.keys()) consumes more memory (up to 25%). + if data: + data = Series( + data=list(data.values()), + index=data.keys(), + dtype=dtype, + ) + else: + data = None - if index is not None: - index = ensure_index(index) + if is_list_like(data) and not isinstance(data, Sized): + data = list(data) - if dtype is not None: - dtype = self._validate_dtype(dtype) + if ( + (is_scalar(data) or not isinstance(data, Sized)) + and index is None + and data is not None + ): + data = [data] - if data is None: - index = index if index is not None else default_index(0) - if len(index) or dtype is not None: - data = na_value_for_dtype(pandas_dtype(dtype), compat=False) + # Series TASK 5: TRANSFORMATION ON INDEX. There is always an index after this. + original_index = index + if index is None: + if data is None: + index = default_index(0) else: - data = [] + if isinstance(data, (SingleBlockManager, Series)): + index = data.index + else: + index = default_index(len(data)) + else: + index = ensure_index(index) - if isinstance(data, MultiIndex): - raise NotImplementedError( - "initializing a Series from a MultiIndex is not supported" - ) + # Series TASK 6: TRANSFORMATIONS ON DATA. + # REQUIREMENTS FOR COPYING AND MANAGER CREATION (WHEN NEEDED). + list_like_input = False + require_manager = True + fast_path_manager = False + if data is None and len(index): + data = na_value - refs = None - if isinstance(data, Index): + elif isinstance(data, Series): + require_manager = False + copy = True if original_index is None else False + deep = not copy + + if original_index is not None: + data = data.reindex(index) # copy + index = data.index + + data = data._mgr + + elif isinstance(data, SingleBlockManager): + require_manager = False + fast_path_manager = original_index is None and not copy and dtype is None + + elif isinstance(data, Index): if dtype is not None: data = data.astype(dtype) @@ -436,139 +479,72 @@ def __init__( data = data._values copy = False - elif isinstance(data, np.ndarray): - if len(data.dtype): - # GH#13296 we are dealing with a compound dtype, which - # should be treated as 2D - raise ValueError( - "Cannot construct a Series from an ndarray with " - "compound dtype. Use DataFrame instead." - ) - elif isinstance(data, Series): - if index is None: - index = data.index - data = data._mgr.copy(deep=False) - else: - data = data.reindex(index) - copy = False - data = data._mgr - elif isinstance(data, Mapping): - data, index = self._init_dict(data, index, dtype) - dtype = None - copy = False - elif isinstance(data, SingleBlockManager): - if index is None: - index = data.index - elif not data.index.equals(index) or copy: - # GH#19275 SingleBlockManager input should only be called - # internally - raise AssertionError( - "Cannot pass both SingleBlockManager " - "`data` argument and a different " - "`index` argument. `copy` must be False." - ) + elif is_array: + pass - if not allow_mgr: - warnings.warn( - f"Passing a {type(data).__name__} to {type(self).__name__} " - "is deprecated and will raise in a future version. " - "Use public APIs instead.", - DeprecationWarning, - stacklevel=2, - ) - allow_mgr = True + elif is_list_like(data): + list_like_input = True - elif isinstance(data, ExtensionArray): - pass - else: - data = com.maybe_iterable_to_list(data) - if is_list_like(data) and not len(data) and dtype is None: - # GH 29405: Pre-2.0, this defaulted to float. + # Series TASK 7: COPYING THE MANAGER. + if require_manager: + # GH 29405: Pre-2.0, this defaulted to float. + default_empty_series = list_like_input and not len(data) and dtype is None + if default_empty_series: dtype = np.dtype(object) - if index is None: - if not is_list_like(data): - data = [data] - index = default_index(len(data)) - elif is_list_like(data): - com.require_length_match(data, index) + # Final requirements + if is_list_like(data): + com.require_length_match(data, index) + + if is_array and copy_arrays: + if copy_arrays: + if dtype is None or astype_is_view(data.dtype, pandas_dtype(dtype)): + data = data.copy() # not np.ndarray.copy(deep=...) - # create/copy the manager - if isinstance(data, SingleBlockManager): - if dtype is not None: - data = data.astype(dtype=dtype, errors="ignore") - elif copy: - data = data.copy() - else: data = sanitize_array(data, index, dtype, copy) data = SingleBlockManager.from_array(data, index, refs=refs) + else: + deep = deep if not fast_path_manager else False + if dtype is not None: + data = data.astype(dtype=dtype, errors="ignore") # Copy the manager + copy = False + + if copy or fast_path_manager: + data = data.copy(deep) + + # Series TASK 8: CREATE THE DATAFRAME NDFrame.__init__(self, data) self.name = name - self._set_axis(0, index) + if not fast_path_manager: + self._set_axis(0, index) + + # Series TASK 9: RAISE WARNINGS + if ( + original_dtype is None + and is_pandas_object + and original_data_dtype == np.object_ + and self.dtype != original_data_dtype + ): + warnings.warn( + "Dtype inference on a pandas object " + "(Series, Index, ExtensionArray) is deprecated. The Series " + "constructor will keep the original dtype in the future. " + "Call `infer_objects` on the result to get the old behavior.", + FutureWarning, + stacklevel=find_stack_level(), + ) - if original_dtype is None and is_pandas_object and data_dtype == np.object_: - if self.dtype != data_dtype: + if original_data_type is SingleBlockManager: + if not allow_mgr: warnings.warn( - "Dtype inference on a pandas object " - "(Series, Index, ExtensionArray) is deprecated. The Series " - "constructor will keep the original dtype in the future. " - "Call `infer_objects` on the result to get the old behavior.", - FutureWarning, - stacklevel=find_stack_level(), + f"Passing a {type(data).__name__} to {type(self).__name__} " + "is deprecated and will raise in a future version. " + "Use public APIs instead.", + DeprecationWarning, + stacklevel=2, ) - def _init_dict( - self, data: Mapping, index: Index | None = None, dtype: DtypeObj | None = None - ): - """ - Derive the "_mgr" and "index" attributes of a new Series from a - dictionary input. - - Parameters - ---------- - data : dict or dict-like - Data used to populate the new Series. - index : Index or None, default None - Index for the new Series: if None, use dict keys. - dtype : np.dtype, ExtensionDtype, or None, default None - The dtype for the new Series: if None, infer from data. - - Returns - ------- - _data : BlockManager for the new Series - index : index for the new Series - """ - keys: Index | tuple - - # Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')] - # raises KeyError), so we iterate the entire dict, and align - if data: - # GH:34717, issue was using zip to extract key and values from data. - # using generators in effects the performance. - # Below is the new way of extracting the keys and values - - keys = tuple(data.keys()) - values = list(data.values()) # Generating list of values- faster way - elif index is not None: - # fastpath for Series(data=None). Just use broadcasting a scalar - # instead of reindexing. - if len(index) or dtype is not None: - values = na_value_for_dtype(pandas_dtype(dtype), compat=False) - else: - values = [] - keys = index - else: - keys, values = default_index(0), [] - - # Input is now list-like, so rely on "standard" construction: - s = Series(values, index=keys, dtype=dtype) - - # Now we just make sure the order is respected, if any - if data and index is not None: - s = s.reindex(index) - return s._mgr, s.index - # ---------------------------------------------------------------------- @property diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 68737e86f0c6a..f428ec5d8990b 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -1388,6 +1388,12 @@ def test_constructor_dict_nan_key(self, value): ) tm.assert_series_equal(result, expected) + def test_dict_np_nan_equals_floatnan(self): + d = {np.nan: 1} + result = Series(d, index=[float("nan")]) + expected = Series(d) + tm.assert_series_equal(result, expected) + def test_constructor_dict_datetime64_index(self): # GH 9456
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [X] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [X] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. This is an study on refactoring the series constructor, with emphasis on two points: - clear separation of the tasks; - and reduction of code duplication. In particular, it should noticed that data manipulation on dictionaries and series data were duplicate and it was possible to eliminate the function `self._init_dict()` by reusing logic used to create series objects from other series. Another interesting point is that, after refactoring, it became evident that the current code uses twice as much memory for manipulating dictionaries, as opposed to lists, with the same data set (tests were done locally with 10 million random numbers). In this case, it is possible to reduce the memory usage while creating the Series with dictionaries by up to 25% by avoiding the creation of a tuple from dictionary keys. As mentioned above, this is an study and the main objective of this PR is to check if tests passes on all platforms. Nevertheless, if any core member can take a look on it, I would be glad to receive a feedback. If the community decide that any idea introduced here are useful for integration in the main branch, I would be glad to take them from this work and do separate PR with them. P.S.: This branch diverged a little bit from main, but the only changes introduced so far were related to #57889. Those changes were incorporated on this PR. Regards EDIT: typo and P.S.
https://api.github.com/repos/pandas-dev/pandas/pulls/57952
2024-03-21T16:52:20Z
2024-04-10T13:42:15Z
null
2024-04-11T17:18:34Z
Backport PR #57764 on branch 2.2.x (BUG: PyArrow dtypes were not supported in the interchange protocol)
diff --git a/doc/source/whatsnew/v2.2.2.rst b/doc/source/whatsnew/v2.2.2.rst index 96f210ce6b7b9..54084abab7817 100644 --- a/doc/source/whatsnew/v2.2.2.rst +++ b/doc/source/whatsnew/v2.2.2.rst @@ -14,6 +14,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ - :meth:`DataFrame.__dataframe__` was producing incorrect data buffers when the a column's type was a pandas nullable on with missing values (:issue:`56702`) +- :meth:`DataFrame.__dataframe__` was producing incorrect data buffers when the a column's type was a pyarrow nullable on with missing values (:issue:`57664`) - .. --------------------------------------------------------------------------- @@ -21,7 +22,8 @@ Fixed regressions Bug fixes ~~~~~~~~~ -- +- :meth:`DataFrame.__dataframe__` was showing bytemask instead of bitmask for ``'string[pyarrow]'`` validity buffer (:issue:`57762`) +- :meth:`DataFrame.__dataframe__` was showing non-null validity buffer (instead of ``None``) ``'string[pyarrow]'`` without missing values (:issue:`57761`) .. --------------------------------------------------------------------------- .. _whatsnew_222.other: diff --git a/pandas/core/interchange/buffer.py b/pandas/core/interchange/buffer.py index 5c97fc17d7070..5d24325e67f62 100644 --- a/pandas/core/interchange/buffer.py +++ b/pandas/core/interchange/buffer.py @@ -12,6 +12,7 @@ if TYPE_CHECKING: import numpy as np + import pyarrow as pa class PandasBuffer(Buffer): @@ -76,3 +77,60 @@ def __repr__(self) -> str: ) + ")" ) + + +class PandasBufferPyarrow(Buffer): + """ + Data in the buffer is guaranteed to be contiguous in memory. + """ + + def __init__( + self, + buffer: pa.Buffer, + *, + length: int, + ) -> None: + """ + Handle pyarrow chunked arrays. + """ + self._buffer = buffer + self._length = length + + @property + def bufsize(self) -> int: + """ + Buffer size in bytes. + """ + return self._buffer.size + + @property + def ptr(self) -> int: + """ + Pointer to start of the buffer as an integer. + """ + return self._buffer.address + + def __dlpack__(self) -> Any: + """ + Represent this structure as DLPack interface. + """ + raise NotImplementedError() + + def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]: + """ + Device type and device ID for where the data in the buffer resides. + """ + return (DlpackDeviceType.CPU, None) + + def __repr__(self) -> str: + return ( + "PandasBuffer[pyarrow](" + + str( + { + "bufsize": self.bufsize, + "ptr": self.ptr, + "device": "CPU", + } + ) + + ")" + ) diff --git a/pandas/core/interchange/column.py b/pandas/core/interchange/column.py index 7b39403ca1916..d59a3df694bb3 100644 --- a/pandas/core/interchange/column.py +++ b/pandas/core/interchange/column.py @@ -1,6 +1,9 @@ from __future__ import annotations -from typing import Any +from typing import ( + TYPE_CHECKING, + Any, +) import numpy as np @@ -9,15 +12,18 @@ from pandas.errors import NoBufferPresent from pandas.util._decorators import cache_readonly -from pandas.core.dtypes.dtypes import ( +from pandas.core.dtypes.dtypes import BaseMaskedDtype + +import pandas as pd +from pandas import ( ArrowDtype, - BaseMaskedDtype, DatetimeTZDtype, ) - -import pandas as pd from pandas.api.types import is_string_dtype -from pandas.core.interchange.buffer import PandasBuffer +from pandas.core.interchange.buffer import ( + PandasBuffer, + PandasBufferPyarrow, +) from pandas.core.interchange.dataframe_protocol import ( Column, ColumnBuffers, @@ -30,6 +36,9 @@ dtype_to_arrow_c_fmt, ) +if TYPE_CHECKING: + from pandas.core.interchange.dataframe_protocol import Buffer + _NP_KINDS = { "i": DtypeKind.INT, "u": DtypeKind.UINT, @@ -157,6 +166,16 @@ def _dtype_from_pandasdtype(self, dtype) -> tuple[DtypeKind, int, str, str]: else: byteorder = dtype.byteorder + if dtype == "bool[pyarrow]": + # return early to avoid the `* 8` below, as this is a bitmask + # rather than a bytemask + return ( + kind, + dtype.itemsize, # pyright: ignore[reportGeneralTypeIssues] + ArrowCTypes.BOOL, + byteorder, + ) + return kind, dtype.itemsize * 8, dtype_to_arrow_c_fmt(dtype), byteorder @property @@ -194,6 +213,12 @@ def describe_null(self): column_null_dtype = ColumnNullType.USE_BYTEMASK null_value = 1 return column_null_dtype, null_value + if isinstance(self._col.dtype, ArrowDtype): + # We already rechunk (if necessary / allowed) upon initialization, so this + # is already single-chunk by the time we get here. + if self._col.array._pa_array.chunks[0].buffers()[0] is None: # type: ignore[attr-defined] + return ColumnNullType.NON_NULLABLE, None + return ColumnNullType.USE_BITMASK, 0 kind = self.dtype[0] try: null, value = _NULL_DESCRIPTION[kind] @@ -278,10 +303,11 @@ def get_buffers(self) -> ColumnBuffers: def _get_data_buffer( self, - ) -> tuple[PandasBuffer, Any]: # Any is for self.dtype tuple + ) -> tuple[Buffer, tuple[DtypeKind, int, str, str]]: """ Return the buffer containing the data and the buffer's associated dtype. """ + buffer: Buffer if self.dtype[0] in ( DtypeKind.INT, DtypeKind.UINT, @@ -291,6 +317,7 @@ def _get_data_buffer( ): # self.dtype[2] is an ArrowCTypes.TIMESTAMP where the tz will make # it longer than 4 characters + dtype = self.dtype if self.dtype[0] == DtypeKind.DATETIME and len(self.dtype[2]) > 4: np_arr = self._col.dt.tz_convert(None).to_numpy() else: @@ -298,11 +325,17 @@ def _get_data_buffer( if isinstance(self._col.dtype, BaseMaskedDtype): np_arr = arr._data # type: ignore[attr-defined] elif isinstance(self._col.dtype, ArrowDtype): - raise NotImplementedError("ArrowDtype not handled yet") + # We already rechunk (if necessary / allowed) upon initialization, + # so this is already single-chunk by the time we get here. + arr = arr._pa_array.chunks[0] # type: ignore[attr-defined] + buffer = PandasBufferPyarrow( + arr.buffers()[1], # type: ignore[attr-defined] + length=len(arr), + ) + return buffer, dtype else: np_arr = arr._ndarray # type: ignore[attr-defined] buffer = PandasBuffer(np_arr, allow_copy=self._allow_copy) - dtype = self.dtype elif self.dtype[0] == DtypeKind.CATEGORICAL: codes = self._col.values._codes buffer = PandasBuffer(codes, allow_copy=self._allow_copy) @@ -330,13 +363,26 @@ def _get_data_buffer( return buffer, dtype - def _get_validity_buffer(self) -> tuple[PandasBuffer, Any]: + def _get_validity_buffer(self) -> tuple[Buffer, Any] | None: """ Return the buffer containing the mask values indicating missing data and the buffer's associated dtype. Raises NoBufferPresent if null representation is not a bit or byte mask. """ null, invalid = self.describe_null + buffer: Buffer + if isinstance(self._col.dtype, ArrowDtype): + # We already rechunk (if necessary / allowed) upon initialization, so this + # is already single-chunk by the time we get here. + arr = self._col.array._pa_array.chunks[0] # type: ignore[attr-defined] + dtype = (DtypeKind.BOOL, 1, ArrowCTypes.BOOL, Endianness.NATIVE) + if arr.buffers()[0] is None: + return None + buffer = PandasBufferPyarrow( + arr.buffers()[0], + length=len(arr), + ) + return buffer, dtype if isinstance(self._col.dtype, BaseMaskedDtype): mask = self._col.array._mask # type: ignore[attr-defined] diff --git a/pandas/core/interchange/dataframe.py b/pandas/core/interchange/dataframe.py index 1ffe0e8e8dbb0..1abacddfc7e3b 100644 --- a/pandas/core/interchange/dataframe.py +++ b/pandas/core/interchange/dataframe.py @@ -5,6 +5,7 @@ from pandas.core.interchange.column import PandasColumn from pandas.core.interchange.dataframe_protocol import DataFrame as DataFrameXchg +from pandas.core.interchange.utils import maybe_rechunk if TYPE_CHECKING: from collections.abc import ( @@ -34,6 +35,10 @@ def __init__(self, df: DataFrame, allow_copy: bool = True) -> None: """ self._df = df.rename(columns=str, copy=False) self._allow_copy = allow_copy + for i, _col in enumerate(self._df.columns): + rechunked = maybe_rechunk(self._df.iloc[:, i], allow_copy=allow_copy) + if rechunked is not None: + self._df.isetitem(i, rechunked) def __dataframe__( self, nan_as_null: bool = False, allow_copy: bool = True diff --git a/pandas/core/interchange/from_dataframe.py b/pandas/core/interchange/from_dataframe.py index d45ae37890ba7..4162ebc33f0d6 100644 --- a/pandas/core/interchange/from_dataframe.py +++ b/pandas/core/interchange/from_dataframe.py @@ -295,13 +295,14 @@ def string_column_to_ndarray(col: Column) -> tuple[np.ndarray, Any]: null_pos = None if null_kind in (ColumnNullType.USE_BITMASK, ColumnNullType.USE_BYTEMASK): - assert buffers["validity"], "Validity buffers cannot be empty for masks" - valid_buff, valid_dtype = buffers["validity"] - null_pos = buffer_to_ndarray( - valid_buff, valid_dtype, offset=col.offset, length=col.size() - ) - if sentinel_val == 0: - null_pos = ~null_pos + validity = buffers["validity"] + if validity is not None: + valid_buff, valid_dtype = validity + null_pos = buffer_to_ndarray( + valid_buff, valid_dtype, offset=col.offset, length=col.size() + ) + if sentinel_val == 0: + null_pos = ~null_pos # Assemble the strings from the code units str_list: list[None | float | str] = [None] * col.size() @@ -486,6 +487,8 @@ def set_nulls( np.ndarray or pd.Series Data with the nulls being set. """ + if validity is None: + return data null_kind, sentinel_val = col.describe_null null_pos = None diff --git a/pandas/core/interchange/utils.py b/pandas/core/interchange/utils.py index 2e73e560e5740..2a19dd5046aa3 100644 --- a/pandas/core/interchange/utils.py +++ b/pandas/core/interchange/utils.py @@ -16,6 +16,8 @@ DatetimeTZDtype, ) +import pandas as pd + if typing.TYPE_CHECKING: from pandas._typing import DtypeObj @@ -145,3 +147,29 @@ def dtype_to_arrow_c_fmt(dtype: DtypeObj) -> str: raise NotImplementedError( f"Conversion of {dtype} to Arrow C format string is not implemented." ) + + +def maybe_rechunk(series: pd.Series, *, allow_copy: bool) -> pd.Series | None: + """ + Rechunk a multi-chunk pyarrow array into a single-chunk array, if necessary. + + - Returns `None` if the input series is not backed by a multi-chunk pyarrow array + (and so doesn't need rechunking) + - Returns a single-chunk-backed-Series if the input is backed by a multi-chunk + pyarrow array and `allow_copy` is `True`. + - Raises a `RuntimeError` if `allow_copy` is `False` and input is a + based by a multi-chunk pyarrow array. + """ + if not isinstance(series.dtype, pd.ArrowDtype): + return None + chunked_array = series.array._pa_array # type: ignore[attr-defined] + if len(chunked_array.chunks) == 1: + return None + if not allow_copy: + raise RuntimeError( + "Found multi-chunk pyarrow array, but `allow_copy` is False. " + "Please rechunk the array before calling this function, or set " + "`allow_copy=True`." + ) + arr = chunked_array.combine_chunks() + return pd.Series(arr, dtype=series.dtype, name=series.name, index=series.index) diff --git a/pandas/tests/interchange/test_impl.py b/pandas/tests/interchange/test_impl.py index a1dedb6be456c..1ccada9116d4c 100644 --- a/pandas/tests/interchange/test_impl.py +++ b/pandas/tests/interchange/test_impl.py @@ -1,4 +1,7 @@ -from datetime import datetime +from datetime import ( + datetime, + timezone, +) import numpy as np import pytest @@ -301,6 +304,51 @@ def test_multi_chunk_pyarrow() -> None: pd.api.interchange.from_dataframe(table, allow_copy=False) +def test_multi_chunk_column() -> None: + pytest.importorskip("pyarrow", "11.0.0") + ser = pd.Series([1, 2, None], dtype="Int64[pyarrow]") + df = pd.concat([ser, ser], ignore_index=True).to_frame("a") + df_orig = df.copy() + with pytest.raises( + RuntimeError, match="Found multi-chunk pyarrow array, but `allow_copy` is False" + ): + pd.api.interchange.from_dataframe(df.__dataframe__(allow_copy=False)) + result = pd.api.interchange.from_dataframe(df.__dataframe__(allow_copy=True)) + # Interchange protocol defaults to creating numpy-backed columns, so currently this + # is 'float64'. + expected = pd.DataFrame({"a": [1.0, 2.0, None, 1.0, 2.0, None]}, dtype="float64") + tm.assert_frame_equal(result, expected) + + # Check that the rechunking we did didn't modify the original DataFrame. + tm.assert_frame_equal(df, df_orig) + assert len(df["a"].array._pa_array.chunks) == 2 + assert len(df_orig["a"].array._pa_array.chunks) == 2 + + +def test_timestamp_ns_pyarrow(): + # GH 56712 + pytest.importorskip("pyarrow", "11.0.0") + timestamp_args = { + "year": 2000, + "month": 1, + "day": 1, + "hour": 1, + "minute": 1, + "second": 1, + } + df = pd.Series( + [datetime(**timestamp_args)], + dtype="timestamp[ns][pyarrow]", + name="col0", + ).to_frame() + + dfi = df.__dataframe__() + result = pd.api.interchange.from_dataframe(dfi)["col0"].item() + + expected = pd.Timestamp(**timestamp_args) + assert result == expected + + @pytest.mark.parametrize("tz", ["UTC", "US/Pacific"]) @pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"]) def test_datetimetzdtype(tz, unit): @@ -403,42 +451,60 @@ def test_non_str_names_w_duplicates(): pd.api.interchange.from_dataframe(dfi, allow_copy=False) -def test_nullable_integers() -> None: - # https://github.com/pandas-dev/pandas/issues/55069 - df = pd.DataFrame({"a": [1]}, dtype="Int8") - expected = pd.DataFrame({"a": [1]}, dtype="int8") - result = pd.api.interchange.from_dataframe(df.__dataframe__()) - tm.assert_frame_equal(result, expected) - - -@pytest.mark.xfail(reason="https://github.com/pandas-dev/pandas/issues/57664") -def test_nullable_integers_pyarrow() -> None: - # https://github.com/pandas-dev/pandas/issues/55069 - df = pd.DataFrame({"a": [1]}, dtype="Int8[pyarrow]") - expected = pd.DataFrame({"a": [1]}, dtype="int8") - result = pd.api.interchange.from_dataframe(df.__dataframe__()) - tm.assert_frame_equal(result, expected) - - @pytest.mark.parametrize( ("data", "dtype", "expected_dtype"), [ ([1, 2, None], "Int64", "int64"), + ([1, 2, None], "Int64[pyarrow]", "int64"), + ([1, 2, None], "Int8", "int8"), + ([1, 2, None], "Int8[pyarrow]", "int8"), ( [1, 2, None], "UInt64", "uint64", ), + ( + [1, 2, None], + "UInt64[pyarrow]", + "uint64", + ), ([1.0, 2.25, None], "Float32", "float32"), + ([1.0, 2.25, None], "Float32[pyarrow]", "float32"), + ([True, False, None], "boolean[pyarrow]", "bool"), + (["much ado", "about", None], "string[pyarrow_numpy]", "large_string"), + (["much ado", "about", None], "string[pyarrow]", "large_string"), + ( + [datetime(2020, 1, 1), datetime(2020, 1, 2), None], + "timestamp[ns][pyarrow]", + "timestamp[ns]", + ), + ( + [datetime(2020, 1, 1), datetime(2020, 1, 2), None], + "timestamp[us][pyarrow]", + "timestamp[us]", + ), + ( + [ + datetime(2020, 1, 1, tzinfo=timezone.utc), + datetime(2020, 1, 2, tzinfo=timezone.utc), + None, + ], + "timestamp[us, Asia/Kathmandu][pyarrow]", + "timestamp[us, tz=Asia/Kathmandu]", + ), ], ) -def test_pandas_nullable_w_missing_values( +def test_pandas_nullable_with_missing_values( data: list, dtype: str, expected_dtype: str ) -> None: # https://github.com/pandas-dev/pandas/issues/57643 - pytest.importorskip("pyarrow", "11.0.0") + # https://github.com/pandas-dev/pandas/issues/57664 + pa = pytest.importorskip("pyarrow", "11.0.0") import pyarrow.interchange as pai + if expected_dtype == "timestamp[us, tz=Asia/Kathmandu]": + expected_dtype = pa.timestamp("us", "Asia/Kathmandu") + df = pd.DataFrame({"a": data}, dtype=dtype) result = pai.from_dataframe(df.__dataframe__())["a"] assert result.type == expected_dtype @@ -447,6 +513,86 @@ def test_pandas_nullable_w_missing_values( assert result[2].as_py() is None +@pytest.mark.parametrize( + ("data", "dtype", "expected_dtype"), + [ + ([1, 2, 3], "Int64", "int64"), + ([1, 2, 3], "Int64[pyarrow]", "int64"), + ([1, 2, 3], "Int8", "int8"), + ([1, 2, 3], "Int8[pyarrow]", "int8"), + ( + [1, 2, 3], + "UInt64", + "uint64", + ), + ( + [1, 2, 3], + "UInt64[pyarrow]", + "uint64", + ), + ([1.0, 2.25, 5.0], "Float32", "float32"), + ([1.0, 2.25, 5.0], "Float32[pyarrow]", "float32"), + ([True, False, False], "boolean[pyarrow]", "bool"), + (["much ado", "about", "nothing"], "string[pyarrow_numpy]", "large_string"), + (["much ado", "about", "nothing"], "string[pyarrow]", "large_string"), + ( + [datetime(2020, 1, 1), datetime(2020, 1, 2), datetime(2020, 1, 3)], + "timestamp[ns][pyarrow]", + "timestamp[ns]", + ), + ( + [datetime(2020, 1, 1), datetime(2020, 1, 2), datetime(2020, 1, 3)], + "timestamp[us][pyarrow]", + "timestamp[us]", + ), + ( + [ + datetime(2020, 1, 1, tzinfo=timezone.utc), + datetime(2020, 1, 2, tzinfo=timezone.utc), + datetime(2020, 1, 3, tzinfo=timezone.utc), + ], + "timestamp[us, Asia/Kathmandu][pyarrow]", + "timestamp[us, tz=Asia/Kathmandu]", + ), + ], +) +def test_pandas_nullable_without_missing_values( + data: list, dtype: str, expected_dtype: str +) -> None: + # https://github.com/pandas-dev/pandas/issues/57643 + pa = pytest.importorskip("pyarrow", "11.0.0") + import pyarrow.interchange as pai + + if expected_dtype == "timestamp[us, tz=Asia/Kathmandu]": + expected_dtype = pa.timestamp("us", "Asia/Kathmandu") + + df = pd.DataFrame({"a": data}, dtype=dtype) + result = pai.from_dataframe(df.__dataframe__())["a"] + assert result.type == expected_dtype + assert result[0].as_py() == data[0] + assert result[1].as_py() == data[1] + assert result[2].as_py() == data[2] + + +def test_string_validity_buffer() -> None: + # https://github.com/pandas-dev/pandas/issues/57761 + pytest.importorskip("pyarrow", "11.0.0") + df = pd.DataFrame({"a": ["x"]}, dtype="large_string[pyarrow]") + result = df.__dataframe__().get_column_by_name("a").get_buffers()["validity"] + assert result is None + + +def test_string_validity_buffer_no_missing() -> None: + # https://github.com/pandas-dev/pandas/issues/57762 + pytest.importorskip("pyarrow", "11.0.0") + df = pd.DataFrame({"a": ["x", None]}, dtype="large_string[pyarrow]") + validity = df.__dataframe__().get_column_by_name("a").get_buffers()["validity"] + assert validity is not None + result = validity[1] + expected = (DtypeKind.BOOL, 1, ArrowCTypes.BOOL, "=") + assert result == expected + + def test_empty_dataframe(): # https://github.com/pandas-dev/pandas/issues/56700 df = pd.DataFrame({"a": []}, dtype="int8")
#57764
https://api.github.com/repos/pandas-dev/pandas/pulls/57947
2024-03-21T08:55:32Z
2024-03-21T16:37:58Z
2024-03-21T16:37:58Z
2024-03-21T16:37:58Z
Clean up more Cython warning
diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index 82e9812094af2..01c7de0c6f2b3 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -1603,7 +1603,7 @@ cdef _categorical_convert(parser_t *parser, int64_t col, # -> ndarray[f'|S{width}'] cdef _to_fw_string(parser_t *parser, int64_t col, int64_t line_start, - int64_t line_end, int64_t width) noexcept: + int64_t line_end, int64_t width): cdef: char *data ndarray result
Overlooked this in the recent PR. I think this is the last of Cython warnings. `(warning: /home/pandas/pandas/_libs/parsers.pyx:1605:26: noexcept clause is ignored for function returning Python object)`
https://api.github.com/repos/pandas-dev/pandas/pulls/57946
2024-03-21T02:27:51Z
2024-03-21T16:16:39Z
2024-03-21T16:16:39Z
2024-03-21T18:28:22Z
PERF: DataFrame(dict) returns RangeIndex columns when possible
diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index ef561d50066d1..731195f0b1268 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -264,6 +264,7 @@ Removal of prior version deprecations/changes Performance improvements ~~~~~~~~~~~~~~~~~~~~~~~~ - :attr:`Categorical.categories` returns a :class:`RangeIndex` columns instead of an :class:`Index` if the constructed ``values`` was a ``range``. (:issue:`57787`) +- :class:`DataFrame` returns a :class:`RangeIndex` columns when possible when ``data`` is a ``dict`` (:issue:`57943`) - :func:`concat` returns a :class:`RangeIndex` level in the :class:`MultiIndex` result when ``keys`` is a ``range`` or :class:`RangeIndex` (:issue:`57542`) - :meth:`RangeIndex.append` returns a :class:`RangeIndex` instead of a :class:`Index` when appending values that could continue the :class:`RangeIndex` (:issue:`57467`) - :meth:`Series.str.extract` returns a :class:`RangeIndex` columns instead of an :class:`Index` column when possible (:issue:`57542`) diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py index a8887a21afa34..9b05eb42c6d6e 100644 --- a/pandas/core/indexes/api.py +++ b/pandas/core/indexes/api.py @@ -1,6 +1,5 @@ from __future__ import annotations -import textwrap from typing import ( TYPE_CHECKING, cast, @@ -23,6 +22,7 @@ ensure_index, ensure_index_from_sequences, get_unanimous_names, + maybe_sequence_to_range, ) from pandas.core.indexes.category import CategoricalIndex from pandas.core.indexes.datetimes import DatetimeIndex @@ -34,16 +34,6 @@ if TYPE_CHECKING: from pandas._typing import Axis -_sort_msg = textwrap.dedent( - """\ -Sorting because non-concatenation axis is not aligned. A future version -of pandas will change to not sort by default. - -To accept the future behavior, pass 'sort=False'. - -To retain the current behavior and silence the warning, pass 'sort=True'. -""" -) __all__ = [ @@ -66,6 +56,7 @@ "all_indexes_same", "default_index", "safe_sort_index", + "maybe_sequence_to_range", ] diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 9a537c71f3cd0..e59c0542ee6da 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -7169,18 +7169,17 @@ def maybe_sequence_to_range(sequence) -> Any | range: ------- Any : input or range """ - if isinstance(sequence, (ABCSeries, Index, range)): + if isinstance(sequence, (ABCSeries, Index, range, ExtensionArray)): return sequence - np_sequence = np.asarray(sequence) - if np_sequence.dtype.kind != "i" or len(np_sequence) == 1: + elif len(sequence) == 1 or lib.infer_dtype(sequence, skipna=False) != "integer": return sequence - elif len(np_sequence) == 0: + elif len(sequence) == 0: return range(0) - diff = np_sequence[1] - np_sequence[0] + diff = sequence[1] - sequence[0] if diff == 0: return sequence - elif len(np_sequence) == 2 or lib.is_sequence_range(np_sequence, diff): - return range(np_sequence[0], np_sequence[-1] + diff, diff) + elif len(sequence) == 2 or lib.is_sequence_range(np.asarray(sequence), diff): + return range(sequence[0], sequence[-1] + diff, diff) else: return sequence diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 93f1674fbd328..73b93110c9018 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -60,6 +60,7 @@ default_index, ensure_index, get_objs_combined_axis, + maybe_sequence_to_range, union_indexes, ) from pandas.core.internals.blocks import ( @@ -403,7 +404,7 @@ def dict_to_mgr( arrays[i] = arr else: - keys = list(data.keys()) + keys = maybe_sequence_to_range(list(data.keys())) columns = Index(keys) if keys else default_index(0) arrays = [com.maybe_iterable_to_list(data[k]) for k in keys] diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 7d1a5b4492740..12d8269b640fc 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -2709,6 +2709,11 @@ def test_inference_on_pandas_objects(self): result = DataFrame({"a": ser}) assert result.dtypes.iloc[0] == np.object_ + def test_dict_keys_returns_rangeindex(self): + result = DataFrame({0: [1], 1: [2]}).columns + expected = RangeIndex(2) + tm.assert_index_equal(result, expected, exact=True) + class TestDataFrameConstructorIndexInference: def test_frame_from_dict_of_series_overlapping_monthly_period_indexes(self): diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index 99250dc929997..f750d5e7fa919 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -1738,6 +1738,7 @@ def test_daily(self): mask = ts.index.year == y expected[y] = Series(ts.values[mask], index=doy[mask]) expected = DataFrame(expected, dtype=float).T + expected.index = expected.index.astype(np.int32) tm.assert_frame_equal(result, expected) def test_monthly(self): @@ -1753,6 +1754,7 @@ def test_monthly(self): mask = ts.index.year == y expected[y] = Series(ts.values[mask], index=month[mask]) expected = DataFrame(expected, dtype=float).T + expected.index = expected.index.astype(np.int32) tm.assert_frame_equal(result, expected) def test_pivot_table_with_iterator_values(self, data):
Discovered in https://github.com/pandas-dev/pandas/pull/57441 Also removed a seemingly unused `_sort_msg`
https://api.github.com/repos/pandas-dev/pandas/pulls/57943
2024-03-20T23:52:43Z
2024-03-25T18:24:40Z
2024-03-25T18:24:40Z
2024-03-25T18:24:43Z
Backport PR #57029 on branch 2.2.x (DOC: Add `DataFrame.to_numpy` method)
diff --git a/doc/source/reference/frame.rst b/doc/source/reference/frame.rst index fefb02dd916cd..1d9019ff22c23 100644 --- a/doc/source/reference/frame.rst +++ b/doc/source/reference/frame.rst @@ -49,6 +49,7 @@ Conversion DataFrame.infer_objects DataFrame.copy DataFrame.bool + DataFrame.to_numpy Indexing, iteration ~~~~~~~~~~~~~~~~~~~
Backport PR #57029: DOC: Add `DataFrame.to_numpy` method
https://api.github.com/repos/pandas-dev/pandas/pulls/57940
2024-03-20T22:22:40Z
2024-03-21T03:05:31Z
2024-03-21T03:05:31Z
2024-03-21T03:05:31Z
DOC: #38067 add missing holiday observance rules
diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst index ecdfb3c565d33..37413722de96f 100644 --- a/doc/source/user_guide/timeseries.rst +++ b/doc/source/user_guide/timeseries.rst @@ -1468,11 +1468,16 @@ or some other non-observed day. Defined observance rules are: :header: "Rule", "Description" :widths: 15, 70 + "next_workday", "move Saturday and Sunday to Monday" + "previous_workday", "move Saturday and Sunday to Friday" "nearest_workday", "move Saturday to Friday and Sunday to Monday" + "before_nearest_workday", "apply ``nearest_workday`` and then move to previous workday before that day" + "after_nearest_workday", "apply ``nearest_workday`` and then move to next workday after that day" "sunday_to_monday", "move Sunday to following Monday" "next_monday_or_tuesday", "move Saturday to Monday and Sunday/Monday to Tuesday" "previous_friday", move Saturday and Sunday to previous Friday" "next_monday", "move Saturday and Sunday to following Monday" + "weekend_to_monday", "same as ``next_monday``" An example of how holidays and holiday calendars are defined: diff --git a/pandas/tseries/holiday.py b/pandas/tseries/holiday.py index 50d0d33f0339f..cc9e2e3be8c38 100644 --- a/pandas/tseries/holiday.py +++ b/pandas/tseries/holiday.py @@ -108,7 +108,7 @@ def nearest_workday(dt: datetime) -> datetime: def next_workday(dt: datetime) -> datetime: """ - returns next weekday used for observances + returns next workday used for observances """ dt += timedelta(days=1) while dt.weekday() > 4: @@ -119,7 +119,7 @@ def next_workday(dt: datetime) -> datetime: def previous_workday(dt: datetime) -> datetime: """ - returns previous weekday used for observances + returns previous workday used for observances """ dt -= timedelta(days=1) while dt.weekday() > 4: @@ -130,7 +130,7 @@ def previous_workday(dt: datetime) -> datetime: def before_nearest_workday(dt: datetime) -> datetime: """ - returns previous workday after nearest workday + returns previous workday before nearest workday """ return previous_workday(nearest_workday(dt))
- [x] closes #38067 Updated docs with missing functions. `weekend_to_monday` and `next_monday` do indeed do the exact same thing, and I agree that `next_monday` should be deprecated. Any thoughts?
https://api.github.com/repos/pandas-dev/pandas/pulls/57939
2024-03-20T22:04:10Z
2024-03-21T16:22:47Z
2024-03-21T16:22:47Z
2024-03-22T00:05:40Z
BUG: #29049 make holiday support offsets of offsets
diff --git a/pandas/tests/tseries/holiday/test_holiday.py b/pandas/tests/tseries/holiday/test_holiday.py index b2eefd04ef93b..08f4a1250392e 100644 --- a/pandas/tests/tseries/holiday/test_holiday.py +++ b/pandas/tests/tseries/holiday/test_holiday.py @@ -271,6 +271,25 @@ def test_both_offset_observance_raises(): ) +def test_list_of_list_of_offsets_raises(): + # see gh-29049 + # Test that the offsets of offsets are forbidden + holiday1 = Holiday( + "Holiday1", + month=USThanksgivingDay.month, + day=USThanksgivingDay.day, + offset=[USThanksgivingDay.offset, DateOffset(1)], + ) + msg = "Only BaseOffsets and flat lists of them are supported for offset." + with pytest.raises(ValueError, match=msg): + Holiday( + "Holiday2", + month=holiday1.month, + day=holiday1.day, + offset=[holiday1.offset, DateOffset(3)], + ) + + def test_half_open_interval_with_observance(): # Prompted by GH 49075 # Check for holidays that have a half-open date interval where diff --git a/pandas/tseries/holiday.py b/pandas/tseries/holiday.py index cc9e2e3be8c38..8e51183138b5c 100644 --- a/pandas/tseries/holiday.py +++ b/pandas/tseries/holiday.py @@ -4,6 +4,7 @@ datetime, timedelta, ) +from typing import Callable import warnings from dateutil.relativedelta import ( @@ -17,6 +18,7 @@ ) import numpy as np +from pandas._libs.tslibs.offsets import BaseOffset from pandas.errors import PerformanceWarning from pandas import ( @@ -159,24 +161,34 @@ def __init__( year=None, month=None, day=None, - offset=None, - observance=None, + offset: BaseOffset | list[BaseOffset] | None = None, + observance: Callable | None = None, start_date=None, end_date=None, - days_of_week=None, + days_of_week: tuple | None = None, ) -> None: """ Parameters ---------- name : str Name of the holiday , defaults to class name - offset : array of pandas.tseries.offsets or - class from pandas.tseries.offsets - computes offset from date - observance: function - computes when holiday is given a pandas Timestamp - days_of_week: - provide a tuple of days e.g (0,1,2,3,) for Monday Through Thursday + year : int, default None + Year of the holiday + month : int, default None + Month of the holiday + day : int, default None + Day of the holiday + offset : list of pandas.tseries.offsets or + class from pandas.tseries.offsets, default None + Computes offset from date + observance : function, default None + Computes when holiday is given a pandas Timestamp + start_date : datetime-like, default None + First date the holiday is observed + end_date : datetime-like, default None + Last date the holiday is observed + days_of_week : tuple of int or dateutil.relativedelta weekday strs, default None + Provide a tuple of days e.g (0,1,2,3,) for Monday Through Thursday Monday=0,..,Sunday=6 Examples @@ -216,8 +228,19 @@ class from pandas.tseries.offsets >>> July3rd Holiday: July 3rd (month=7, day=3, ) """ - if offset is not None and observance is not None: - raise NotImplementedError("Cannot use both offset and observance.") + if offset is not None: + if observance is not None: + raise NotImplementedError("Cannot use both offset and observance.") + if not ( + isinstance(offset, BaseOffset) + or ( + isinstance(offset, list) + and all(isinstance(off, BaseOffset) for off in offset) + ) + ): + raise ValueError( + "Only BaseOffsets and flat lists of them are supported for offset." + ) self.name = name self.year = year
- [x] closes #29049 (Replace xxxx with the GitHub issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/v3.0.0.rst` file if fixing a bug or adding a new feature. The summing of offsets is generally not possible, so the bug is fixed by simply flattening the asymmetric list of lists of offsets that one might easily end up with when defining holidays in relation to each other. In theory it is possible to sum consecutive `DataOffset(n, weekday=None)` in the offset list, but I did not want to introduce even more complexity for such little gain. Allowing asymmetrical lists of lists and their element dtypes as parameters is not very pretty, but the only solution to this problem that does not involve a major refactoring of the offset classes to allow representation of lists of offsets in a single composite offset e.g. ["TUE(1)", 3]. Summing of composite offsets is not possible because additions between weekday and regular day shift offsets are not associative, not distributive, and not commutative. Another option would be to just enhance the error message given to the end user when trying to chain offsets in the manner shown in the issue and let them do the aggregation to a simple list of offsets themselves.
https://api.github.com/repos/pandas-dev/pandas/pulls/57938
2024-03-20T21:39:57Z
2024-03-28T00:08:54Z
2024-03-28T00:08:54Z
2024-03-28T00:51:29Z
Fix tagging within Dockerfile
diff --git a/Dockerfile b/Dockerfile index 03f76f39b8cc7..0fcbcee92295c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -11,4 +11,5 @@ RUN apt-get install -y libhdf5-dev libgles2-mesa-dev RUN python -m pip install --upgrade pip COPY requirements-dev.txt /tmp RUN python -m pip install -r /tmp/requirements-dev.txt +RUN git config --global --add safe.directory /home/pandas CMD ["/bin/bash"]
When the user within a docker container does not match the user on the host machine (this is the case by default on Linux, not sure of all OSes) you cannot use git to inspect the worktree: ```sh root@6de0debf3870:/home/pandas# git log fatal: detected dubious ownership in repository at '/home/pandas' To add an exception for this directory, call: git config --global --add safe.directory /home/pandas ``` This prevents builds within docker from being tagged with the appropriate git revision
https://api.github.com/repos/pandas-dev/pandas/pulls/57935
2024-03-20T21:07:30Z
2024-03-20T23:21:09Z
2024-03-20T23:21:09Z
2024-03-20T23:21:16Z
REF: Clean up concat statefullness and validation
diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index 1f0fe0542a0c0..35a08e0167924 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -17,10 +17,7 @@ from pandas.util._decorators import cache_readonly -from pandas.core.dtypes.common import ( - is_bool, - is_iterator, -) +from pandas.core.dtypes.common import is_bool from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ( ABCDataFrame, @@ -423,11 +420,12 @@ def __init__( self.ignore_index = ignore_index self.verify_integrity = verify_integrity - objs, keys = self._clean_keys_and_objs(objs, keys) + objs, keys, ndims = _clean_keys_and_objs(objs, keys) - # figure out what our result ndim is going to be - ndims = self._get_ndims(objs) - sample, objs = self._get_sample_object(objs, ndims, keys, names, levels) + # select an object to be our result reference + sample, objs = _get_sample_object( + objs, ndims, keys, names, levels, self.intersect + ) # Standardize axis parameter to int if sample.ndim == 1: @@ -458,100 +456,6 @@ def __init__( self.names = names or getattr(keys, "names", None) self.levels = levels - def _get_ndims(self, objs: list[Series | DataFrame]) -> set[int]: - # figure out what our result ndim is going to be - ndims = set() - for obj in objs: - if not isinstance(obj, (ABCSeries, ABCDataFrame)): - msg = ( - f"cannot concatenate object of type '{type(obj)}'; " - "only Series and DataFrame objs are valid" - ) - raise TypeError(msg) - - ndims.add(obj.ndim) - return ndims - - def _clean_keys_and_objs( - self, - objs: Iterable[Series | DataFrame] | Mapping[HashableT, Series | DataFrame], - keys, - ) -> tuple[list[Series | DataFrame], Index | None]: - if isinstance(objs, abc.Mapping): - if keys is None: - keys = list(objs.keys()) - objs_list = [objs[k] for k in keys] - else: - objs_list = list(objs) - - if len(objs_list) == 0: - raise ValueError("No objects to concatenate") - - if keys is None: - objs_list = list(com.not_none(*objs_list)) - else: - # GH#1649 - key_indices = [] - clean_objs = [] - if is_iterator(keys): - keys = list(keys) - if len(keys) != len(objs_list): - # GH#43485 - raise ValueError( - f"The length of the keys ({len(keys)}) must match " - f"the length of the objects to concatenate ({len(objs_list)})" - ) - for i, obj in enumerate(objs_list): - if obj is not None: - key_indices.append(i) - clean_objs.append(obj) - objs_list = clean_objs - - if not isinstance(keys, Index): - keys = Index(keys) - - if len(key_indices) < len(keys): - keys = keys.take(key_indices) - - if len(objs_list) == 0: - raise ValueError("All objects passed were None") - - return objs_list, keys - - def _get_sample_object( - self, - objs: list[Series | DataFrame], - ndims: set[int], - keys, - names, - levels, - ) -> tuple[Series | DataFrame, list[Series | DataFrame]]: - # get the sample - # want the highest ndim that we have, and must be non-empty - # unless all objs are empty - sample: Series | DataFrame | None = None - if len(ndims) > 1: - max_ndim = max(ndims) - for obj in objs: - if obj.ndim == max_ndim and np.sum(obj.shape): - sample = obj - break - - else: - # filter out the empties if we have not multi-index possibilities - # note to keep empty Series as it affect to result columns / name - non_empties = [obj for obj in objs if sum(obj.shape) > 0 or obj.ndim == 1] - - if len(non_empties) and ( - keys is None and names is None and levels is None and not self.intersect - ): - objs = non_empties - sample = objs[0] - - if sample is None: - sample = objs[0] - return sample, objs - def _sanitize_mixed_ndim( self, objs: list[Series | DataFrame], @@ -664,29 +568,24 @@ def get_result(self): out = sample._constructor_from_mgr(new_data, axes=new_data.axes) return out.__finalize__(self, method="concat") - def _get_result_dim(self) -> int: - if self._is_series and self.bm_axis == 1: - return 2 - else: - return self.objs[0].ndim - @cache_readonly def new_axes(self) -> list[Index]: - ndim = self._get_result_dim() + if self._is_series and self.bm_axis == 1: + ndim = 2 + else: + ndim = self.objs[0].ndim return [ - self._get_concat_axis if i == self.bm_axis else self._get_comb_axis(i) + self._get_concat_axis + if i == self.bm_axis + else get_objs_combined_axis( + self.objs, + axis=self.objs[0]._get_block_manager_axis(i), + intersect=self.intersect, + sort=self.sort, + ) for i in range(ndim) ] - def _get_comb_axis(self, i: AxisInt) -> Index: - data_axis = self.objs[0]._get_block_manager_axis(i) - return get_objs_combined_axis( - self.objs, - axis=data_axis, - intersect=self.intersect, - sort=self.sort, - ) - @cache_readonly def _get_concat_axis(self) -> Index: """ @@ -747,6 +646,98 @@ def _maybe_check_integrity(self, concat_index: Index) -> None: raise ValueError(f"Indexes have overlapping values: {overlap}") +def _clean_keys_and_objs( + objs: Iterable[Series | DataFrame] | Mapping[HashableT, Series | DataFrame], + keys, +) -> tuple[list[Series | DataFrame], Index | None, set[int]]: + """ + Returns + ------- + clean_objs : list[Series | DataFrame] + LIst of DataFrame and Series with Nones removed. + keys : Index | None + None if keys was None + Index if objs was a Mapping or keys was not None. Filtered where objs was None. + ndim : set[int] + Unique .ndim attribute of obj encountered. + """ + if isinstance(objs, abc.Mapping): + if keys is None: + keys = objs.keys() + objs_list = [objs[k] for k in keys] + else: + objs_list = list(objs) + + if len(objs_list) == 0: + raise ValueError("No objects to concatenate") + + if keys is not None: + if not isinstance(keys, Index): + keys = Index(keys) + if len(keys) != len(objs_list): + # GH#43485 + raise ValueError( + f"The length of the keys ({len(keys)}) must match " + f"the length of the objects to concatenate ({len(objs_list)})" + ) + + # GH#1649 + key_indices = [] + clean_objs = [] + ndims = set() + for i, obj in enumerate(objs_list): + if obj is None: + continue + elif isinstance(obj, (ABCSeries, ABCDataFrame)): + key_indices.append(i) + clean_objs.append(obj) + ndims.add(obj.ndim) + else: + msg = ( + f"cannot concatenate object of type '{type(obj)}'; " + "only Series and DataFrame objs are valid" + ) + raise TypeError(msg) + + if keys is not None and len(key_indices) < len(keys): + keys = keys.take(key_indices) + + if len(clean_objs) == 0: + raise ValueError("All objects passed were None") + + return clean_objs, keys, ndims + + +def _get_sample_object( + objs: list[Series | DataFrame], + ndims: set[int], + keys, + names, + levels, + intersect: bool, +) -> tuple[Series | DataFrame, list[Series | DataFrame]]: + # get the sample + # want the highest ndim that we have, and must be non-empty + # unless all objs are empty + if len(ndims) > 1: + max_ndim = max(ndims) + for obj in objs: + if obj.ndim == max_ndim and sum(obj.shape): # type: ignore[arg-type] + return obj, objs + elif keys is None and names is None and levels is None and not intersect: + # filter out the empties if we have not multi-index possibilities + # note to keep empty Series as it affect to result columns / name + if ndims.pop() == 2: + non_empties = [obj for obj in objs if sum(obj.shape)] + else: + non_empties = objs + + if len(non_empties): + return non_empties[0], non_empties + + return objs[0], objs + + def _concat_indexes(indexes) -> Index: return indexes[0].append(indexes[1:])
* Removed an additional pass over `objs` during validation * Removed methods off of `_Concatenator` that did not rely on `self`
https://api.github.com/repos/pandas-dev/pandas/pulls/57933
2024-03-20T17:59:59Z
2024-03-20T21:00:44Z
2024-03-20T21:00:44Z
2024-03-20T21:00:47Z
DOC: fix minor typos and grammar missing_data.rst
diff --git a/doc/source/user_guide/missing_data.rst b/doc/source/user_guide/missing_data.rst index aea7688c062b8..2e104ac06f9f4 100644 --- a/doc/source/user_guide/missing_data.rst +++ b/doc/source/user_guide/missing_data.rst @@ -88,7 +88,7 @@ To detect these missing value, use the :func:`isna` or :func:`notna` methods. .. warning:: - Experimental: the behaviour of :class:`NA`` can still change without warning. + Experimental: the behaviour of :class:`NA` can still change without warning. Starting from pandas 1.0, an experimental :class:`NA` value (singleton) is available to represent scalar missing values. The goal of :class:`NA` is provide a @@ -105,7 +105,7 @@ dtype, it will use :class:`NA`: s[2] s[2] is pd.NA -Currently, pandas does not yet use those data types using :class:`NA` by default +Currently, pandas does not use those data types using :class:`NA` by default in a :class:`DataFrame` or :class:`Series`, so you need to specify the dtype explicitly. An easy way to convert to those dtypes is explained in the :ref:`conversion section <missing_data.NA.conversion>`. @@ -253,8 +253,8 @@ Conversion ^^^^^^^^^^ If you have a :class:`DataFrame` or :class:`Series` using ``np.nan``, -:meth:`Series.convert_dtypes` and :meth:`DataFrame.convert_dtypes` -in :class:`DataFrame` that can convert data to use the data types that use :class:`NA` +:meth:`DataFrame.convert_dtypes` and :meth:`Series.convert_dtypes`, respectively, +will convert your data to use the nullable data types supporting :class:`NA`, such as :class:`Int64Dtype` or :class:`ArrowDtype`. This is especially helpful after reading in data sets from IO methods where data types were inferred.
I did not make a corresponding issue for this minor improvement.
https://api.github.com/repos/pandas-dev/pandas/pulls/57929
2024-03-20T13:26:20Z
2024-03-20T16:32:14Z
2024-03-20T16:32:14Z
2024-03-21T17:17:04Z
BUG: Offsets of offsets in holiday raise type error during usage
diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index 16be9e0a4fc34..84c5142619b37 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -280,6 +280,7 @@ Performance improvements Bug fixes ~~~~~~~~~ +- Fixed bug in :class:`Series.Holiday` that leads to a ``TypeError`` when using ``Holiday.dates`` with a ``Holiday`` that is initialized with ``offset`` of type ``list`` which itself references another Holiday's list of offsets. (:issue:`29049`) - Fixed bug in :class:`SparseDtype` for equal comparison with na fill value. (:issue:`54770`) - Fixed bug in :meth:`DataFrame.join` inconsistently setting result index name (:issue:`55815`) - Fixed bug in :meth:`DataFrame.to_string` that raised ``StopIteration`` with nested DataFrames. (:issue:`16098`) diff --git a/pandas/tests/tseries/holiday/test_holiday.py b/pandas/tests/tseries/holiday/test_holiday.py index b2eefd04ef93b..da8f2a06855d3 100644 --- a/pandas/tests/tseries/holiday/test_holiday.py +++ b/pandas/tests/tseries/holiday/test_holiday.py @@ -6,6 +6,7 @@ from pandas import ( DatetimeIndex, Series, + to_datetime, ) import pandas._testing as tm @@ -198,6 +199,33 @@ def test_holidays_within_dates(holiday, start, expected): ) == [utc.localize(dt) for dt in expected] +def test_holidays_within_dates_offset_of_offset(): + # see gh-29049 + # Test that the offset of an offset is correctly applied to the holiday + # And that dates can be calculated + holiday1 = Holiday( + "Holiday1", + month=USThanksgivingDay.month, + day=USThanksgivingDay.day, + offset=[USThanksgivingDay.offset, DateOffset(1)], + ) + holiday2 = Holiday( + "Holiday2", + month=holiday1.month, + day=holiday1.day, + offset=[holiday1.offset, DateOffset(3)], + ) + # there shall be no lists of lists here + for offset in holiday2.offset: + assert isinstance(offset, DateOffset) + + min_date, max_date = (to_datetime(x) for x in ["2017-11-1", "2018-11-30"]) + expected_min, expected_max = DatetimeIndex(["2017-11-27", "2018-11-26"]) + actual_min, actual_max = holiday2.dates(min_date, max_date) + assert actual_min == expected_min + assert actual_max == expected_max + + @pytest.mark.parametrize( "transform", [lambda x: x.strftime("%Y-%m-%d"), lambda x: Timestamp(x)] ) diff --git a/pandas/tseries/holiday.py b/pandas/tseries/holiday.py index 50d0d33f0339f..3da812c77b86f 100644 --- a/pandas/tseries/holiday.py +++ b/pandas/tseries/holiday.py @@ -4,6 +4,11 @@ datetime, timedelta, ) +from typing import ( + TYPE_CHECKING, + Callable, + SupportsIndex, +) import warnings from dateutil.relativedelta import ( @@ -33,6 +38,9 @@ Easter, ) +if TYPE_CHECKING: + from pandas._libs.tslibs.offsets import BaseOffset + def next_monday(dt: datetime) -> datetime: """ @@ -156,25 +164,33 @@ class Holiday: def __init__( self, name: str, - year=None, - month=None, - day=None, - offset=None, - observance=None, + year: SupportsIndex | None = None, + month: SupportsIndex | None = None, + day: SupportsIndex | None = None, + offset: None | BaseOffset | list[BaseOffset | list[BaseOffset]] = None, + observance: Callable | None = None, start_date=None, end_date=None, - days_of_week=None, + days_of_week: tuple | None = None, ) -> None: """ Parameters ---------- name : str Name of the holiday , defaults to class name + year: int + Year of the holiday + month: int + Month of the holiday + day: int + Day of the holiday offset : array of pandas.tseries.offsets or class from pandas.tseries.offsets computes offset from date observance: function computes when holiday is given a pandas Timestamp + start_date : datetime-like, optional + end_date : datetime-like, optional days_of_week: provide a tuple of days e.g (0,1,2,3,) for Monday Through Thursday Monday=0,..,Sunday=6 @@ -223,7 +239,18 @@ class from pandas.tseries.offsets self.year = year self.month = month self.day = day - self.offset = offset + if isinstance(offset, list): + self.offset = [] + for off in offset: + # check if we are handling offsets of another holiday + if isinstance(off, list): + self.offset.extend(np.ravel(off)) + else: + # otherwise it should be a DateOffset, we do not support other + # array-like types + self.offset.append(off) + else: + self.offset = offset self.start_date = ( Timestamp(start_date) if start_date is not None else start_date )
- [x] closes #29049 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added an entry in the latest `doc/source/whatsnew/v3.0.0.rst` file if fixing a bug or adding a new feature. Still needs some work on the typing since I am touching things that have been working implicitly since forever, but that mypi does not find acceptable. Type annotations are hard to get consistent here without changing the behaviour of `Holiday` and without ugly workarounds.
https://api.github.com/repos/pandas-dev/pandas/pulls/57921
2024-03-19T22:50:10Z
2024-03-20T11:15:16Z
null
2024-03-20T11:15:16Z
CLN: Remove unused code
diff --git a/pandas/conftest.py b/pandas/conftest.py index 9302c581fd497..50a94b35c2edc 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -1231,10 +1231,6 @@ def tz_aware_fixture(request): return request.param -# Generate cartesian product of tz_aware_fixture: -tz_aware_fixture2 = tz_aware_fixture - - _UTCS = ["utc", "dateutil/UTC", utc, tzutc(), timezone.utc] if zoneinfo is not None: _UTCS.append(zoneinfo.ZoneInfo("UTC")) diff --git a/pandas/core/base.py b/pandas/core/base.py index 33b37319675ae..987136ffdff7d 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -87,12 +87,6 @@ _shared_docs: dict[str, str] = {} -_indexops_doc_kwargs = { - "klass": "IndexOpsMixin", - "inplace": "", - "unique": "IndexOpsMixin", - "duplicated": "IndexOpsMixin", -} class PandasObject(DirNamesMixin): diff --git a/pandas/core/dtypes/astype.py b/pandas/core/dtypes/astype.py index d351d13fdfeb6..086f7d2da6640 100644 --- a/pandas/core/dtypes/astype.py +++ b/pandas/core/dtypes/astype.py @@ -37,8 +37,6 @@ from pandas.core.arrays import ExtensionArray -_dtype_obj = np.dtype(object) - @overload def _astype_nansafe(
Some more unused code. Most of them are not detected by `vulture` but only visible on the editor through Pylance, so I'm not quite sure how to detect them systematically.
https://api.github.com/repos/pandas-dev/pandas/pulls/57920
2024-03-19T22:30:25Z
2024-03-20T00:23:14Z
2024-03-20T00:23:14Z
2024-03-20T00:23:20Z
Remove Cython warnings
diff --git a/pandas/_libs/tslibs/util.pxd b/pandas/_libs/tslibs/util.pxd index e4ac3a9e167a3..a5822e57d3fa6 100644 --- a/pandas/_libs/tslibs/util.pxd +++ b/pandas/_libs/tslibs/util.pxd @@ -185,11 +185,11 @@ cdef inline const char* get_c_string(str py_string) except NULL: return get_c_string_buf_and_size(py_string, NULL) -cdef inline bytes string_encode_locale(str py_string) noexcept: +cdef inline bytes string_encode_locale(str py_string): """As opposed to PyUnicode_Encode, use current system locale to encode.""" return PyUnicode_EncodeLocale(py_string, NULL) -cdef inline object char_to_string_locale(const char* data) noexcept: +cdef inline object char_to_string_locale(const char* data): """As opposed to PyUnicode_FromString, use current system locale to decode.""" return PyUnicode_DecodeLocale(data, NULL)
warning: /Users/runner/work/pandas/pandas/pandas/_libs/tslibs/util.pxd:188:38: noexcept clause is ignored for function returning Python object warning: /Users/runner/work/pandas/pandas/pandas/_libs/tslibs/util.pxd:193:40: noexcept clause is ignored for function returning Python object
https://api.github.com/repos/pandas-dev/pandas/pulls/57919
2024-03-19T22:19:43Z
2024-03-20T00:22:29Z
2024-03-20T00:22:29Z
2024-03-20T00:22:35Z
STYLE: Detect unnecessary pylint ignore
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 190ea32203807..41f1c4c6892a3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -78,7 +78,7 @@ repos: hooks: - id: pylint stages: [manual] - args: [--load-plugins=pylint.extensions.redefined_loop_name] + args: [--load-plugins=pylint.extensions.redefined_loop_name, --fail-on=I0021] - id: pylint alias: redefined-outer-name name: Redefining name from outer scope diff --git a/pandas/core/groupby/indexing.py b/pandas/core/groupby/indexing.py index 75c0a062b57d0..c658f625d5ea9 100644 --- a/pandas/core/groupby/indexing.py +++ b/pandas/core/groupby/indexing.py @@ -114,7 +114,6 @@ def _positional_selector(self) -> GroupByPositionalSelector: 4 b 5 """ if TYPE_CHECKING: - # pylint: disable-next=used-before-assignment groupby_self = cast(groupby.GroupBy, self) else: groupby_self = self diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index b8b1d39d4eb20..2aeb1aff07a54 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -993,7 +993,6 @@ def to_datetime( errors=errors, exact=exact, ) - # pylint: disable-next=used-before-assignment result: Timestamp | NaTType | Series | Index if isinstance(arg, Timestamp): diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py index b9d5f04cb203b..1de53993fe646 100644 --- a/pandas/tests/io/formats/test_to_latex.py +++ b/pandas/tests/io/formats/test_to_latex.py @@ -1311,7 +1311,6 @@ def test_to_latex_multiindex_names(self, name0, name1, axes): ) col_names = [n if (bool(n) and 1 in axes) else "" for n in names] observed = df.to_latex(multirow=False) - # pylint: disable-next=consider-using-f-string expected = r"""\begin{tabular}{llrrrr} \toprule & %s & \multicolumn{2}{r}{1} & \multicolumn{2}{r}{2} \\ diff --git a/pandas/tests/series/test_iteration.py b/pandas/tests/series/test_iteration.py index edc82455234bb..1e0fa7fae107e 100644 --- a/pandas/tests/series/test_iteration.py +++ b/pandas/tests/series/test_iteration.py @@ -4,12 +4,10 @@ def test_keys(self, datetime_series): def test_iter_datetimes(self, datetime_series): for i, val in enumerate(datetime_series): - # pylint: disable-next=unnecessary-list-index-lookup assert val == datetime_series.iloc[i] def test_iter_strings(self, string_series): for i, val in enumerate(string_series): - # pylint: disable-next=unnecessary-list-index-lookup assert val == string_series.iloc[i] def test_iteritems_datetimes(self, datetime_series):
Enable `pylint`'s `I0021`/`useless-suppression` ([doc](https://pylint.pycqa.org/en/latest/user_guide/messages/information/useless-suppression.html)) This should make it easier to recognize how many `pylint` rules that `ruff` is still missing.
https://api.github.com/repos/pandas-dev/pandas/pulls/57918
2024-03-19T21:52:17Z
2024-03-19T22:20:15Z
2024-03-19T22:20:15Z
2024-03-19T22:27:28Z
DOC: Update docs with the use of meson instead of setup.py
diff --git a/doc/source/development/maintaining.rst b/doc/source/development/maintaining.rst index 5d833dca50732..f6ff95aa72c6c 100644 --- a/doc/source/development/maintaining.rst +++ b/doc/source/development/maintaining.rst @@ -151,7 +151,7 @@ and then run:: git bisect start git bisect good v1.4.0 git bisect bad v1.5.0 - git bisect run bash -c "python setup.py build_ext -j 4; python t.py" + git bisect run bash -c "python -m pip install -ve . --no-build-isolation --config-settings editable-verbose=true; python t.py" This finds the first commit that changed the behavior. The C extensions have to be rebuilt at every step, so the search can take a while. @@ -159,7 +159,7 @@ rebuilt at every step, so the search can take a while. Exit bisect and rebuild the current version:: git bisect reset - python setup.py build_ext -j 4 + python -m pip install -ve . --no-build-isolation --config-settings editable-verbose=true Report your findings under the corresponding issue and ping the commit author to get their input. diff --git a/pandas/__init__.py b/pandas/__init__.py index f7ae91dd847f7..3ee6f6abf97bf 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -28,7 +28,8 @@ raise ImportError( f"C extension: {_module} not built. If you want to import " "pandas from the source directory, you may need to run " - "'python setup.py build_ext' to build the C extensions first." + "'python -m pip install -ve . --no-build-isolation --config-settings " + "editable-verbose=true' to build the C extensions first." ) from _err from pandas._config import (
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Not sure if this is still needed: https://github.com/pandas-dev/pandas/blob/main/gitpod/gitpod.Dockerfile#L38C5-L38C45
https://api.github.com/repos/pandas-dev/pandas/pulls/57917
2024-03-19T19:22:14Z
2024-03-22T14:57:06Z
2024-03-22T14:57:06Z
2024-03-22T15:11:13Z
DOC: Getting started tutorials css adjustments
diff --git a/doc/source/_static/css/getting_started.css b/doc/source/_static/css/getting_started.css index 0d53bbde94ae3..b02311eb66080 100644 --- a/doc/source/_static/css/getting_started.css +++ b/doc/source/_static/css/getting_started.css @@ -248,6 +248,7 @@ ul.task-bullet > li > p:first-child { } .tutorial-card .card-header { + --bs-card-cap-color: var(--pst-color-text-base); cursor: pointer; background-color: var(--pst-color-surface); border: 1px solid var(--pst-color-border) @@ -269,7 +270,7 @@ ul.task-bullet > li > p:first-child { .tutorial-card .gs-badge-link a { - color: var(--pst-color-text-base); + color: var(--pst-color-primary-text); text-decoration: none; }
- [x] closes #57912 - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). I implemented what I suggested in the issue.
https://api.github.com/repos/pandas-dev/pandas/pulls/57916
2024-03-19T18:54:56Z
2024-03-20T16:39:36Z
2024-03-20T16:39:36Z
2024-03-21T08:09:40Z
BUG: pretty print all Mappings, not just dicts
diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index cb211b0b72dce..f3fcdcdb79ed6 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -357,6 +357,7 @@ MultiIndex I/O ^^^ - Bug in :meth:`DataFrame.to_excel` when writing empty :class:`DataFrame` with :class:`MultiIndex` on both axes (:issue:`57696`) +- Now all ``Mapping`` s are pretty printed correctly. Before only literal ``dict`` s were. (:issue:`57915`) - - diff --git a/pandas/io/formats/printing.py b/pandas/io/formats/printing.py index 214d1d7079fdb..0bd4f2935f4d0 100644 --- a/pandas/io/formats/printing.py +++ b/pandas/io/formats/printing.py @@ -187,8 +187,8 @@ def pprint_thing( _nest_lvl : internal use only. pprint_thing() is mutually-recursive with pprint_sequence, this argument is used to keep track of the current nesting level, and limit it. - escape_chars : list or dict, optional - Characters to escape. If a dict is passed the values are the + escape_chars : list[str] or Mapping[str, str], optional + Characters to escape. If a Mapping is passed the values are the replacements default_escapes : bool, default False Whether the input escape characters replaces or adds to the defaults @@ -204,11 +204,11 @@ def as_escaped_string( thing: Any, escape_chars: EscapeChars | None = escape_chars ) -> str: translate = {"\t": r"\t", "\n": r"\n", "\r": r"\r"} - if isinstance(escape_chars, dict): + if isinstance(escape_chars, Mapping): if default_escapes: translate.update(escape_chars) else: - translate = escape_chars + translate = escape_chars # type: ignore[assignment] escape_chars = list(escape_chars.keys()) else: escape_chars = escape_chars or () @@ -220,7 +220,7 @@ def as_escaped_string( if hasattr(thing, "__next__"): return str(thing) - elif isinstance(thing, dict) and _nest_lvl < get_option( + elif isinstance(thing, Mapping) and _nest_lvl < get_option( "display.pprint_nest_depth" ): result = _pprint_dict( diff --git a/pandas/tests/io/formats/test_printing.py b/pandas/tests/io/formats/test_printing.py index acf2bc72c687d..1009dfec53218 100644 --- a/pandas/tests/io/formats/test_printing.py +++ b/pandas/tests/io/formats/test_printing.py @@ -1,5 +1,6 @@ # Note! This file is aimed specifically at pandas.io.formats.printing utility # functions, not the general printing of pandas objects. +from collections.abc import Mapping import string import pandas._config.config as cf @@ -16,6 +17,17 @@ def test_adjoin(): assert adjoined == expected +class MyMapping(Mapping): + def __getitem__(self, key): + return 4 + + def __iter__(self): + return iter(["a", "b"]) + + def __len__(self): + return 2 + + class TestPPrintThing: def test_repr_binary_type(self): letters = string.ascii_letters @@ -42,6 +54,12 @@ def test_repr_obeys_max_seq_limit(self): def test_repr_set(self): assert printing.pprint_thing({1}) == "{1}" + def test_repr_dict(self): + assert printing.pprint_thing({"a": 4, "b": 4}) == "{'a': 4, 'b': 4}" + + def test_repr_mapping(self): + assert printing.pprint_thing(MyMapping()) == "{'a': 4, 'b': 4}" + class TestFormatBase: def test_adjoin(self):
This was discovered in https://github.com/ibis-project/ibis/issues/8687 Due to a strict `isinstance(x, dict)` check, custom mapping types would instead get treated as mere iterables, so only the keys would get printed as a tuple: ```python import pandas as pd from ibis.common.collections import frozendict pd.Series([frozendict({"a": 5, "b": 6})]) # 0 (a, b) # dtype: object ``` - [x] NA: closes #xxxx - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] NA Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/57915
2024-03-19T18:53:53Z
2024-03-20T00:23:31Z
2024-03-20T00:23:31Z
2024-03-20T00:23:37Z
REF/PERF: Use concat(..., ignore_index=True) when index doesn't matter
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 60529c1c2251b..429dc9236cf45 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -2622,7 +2622,7 @@ def describe(self) -> DataFrame: from pandas import Index from pandas.core.reshape.concat import concat - result = concat([counts, freqs], axis=1) + result = concat([counts, freqs], ignore_index=True, axis=1) result.columns = Index(["counts", "freqs"]) result.index.name = "categories" diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 3b20b854b344e..361e9e87fadb8 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -574,7 +574,7 @@ def _transform_general( if results: from pandas.core.reshape.concat import concat - concatenated = concat(results) + concatenated = concat(results, ignore_index=True) result = self._set_result_index_ordered(concatenated) else: result = self.obj._constructor(dtype=np.float64) @@ -1803,7 +1803,9 @@ def _transform_general(self, func, engine, engine_kwargs, *args, **kwargs): applied.append(res) concat_index = obj.columns - concatenated = concat(applied, axis=0, verify_integrity=False) + concatenated = concat( + applied, axis=0, verify_integrity=False, ignore_index=True + ) concatenated = concatenated.reindex(concat_index, axis=1) return self._set_result_index_ordered(concatenated) @@ -2797,7 +2799,7 @@ def _wrap_transform_general_frame( # other dimension; this will preserve dtypes # GH14457 if res.index.is_(obj.index): - res_frame = concat([res] * len(group.columns), axis=1) + res_frame = concat([res] * len(group.columns), axis=1, ignore_index=True) res_frame.columns = group.columns res_frame.index = group.index else: diff --git a/pandas/core/methods/describe.py b/pandas/core/methods/describe.py index 380bf9ce55659..ef20d4c509732 100644 --- a/pandas/core/methods/describe.py +++ b/pandas/core/methods/describe.py @@ -175,6 +175,7 @@ def describe(self, percentiles: Sequence[float] | np.ndarray) -> DataFrame: d = concat( [x.reindex(col_names) for x in ldesc], axis=1, + ignore_index=True, sort=False, ) d.columns = data.columns.copy() diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index 24a070a536150..f51a833e5f906 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -243,7 +243,7 @@ def melt( not isinstance(dt, np.dtype) and dt._supports_2d for dt in frame.dtypes ): mdata[value_name] = concat( - [frame.iloc[:, i] for i in range(frame.shape[1])] + [frame.iloc[:, i] for i in range(frame.shape[1])], ignore_index=True ).values else: mdata[value_name] = frame._values.ravel("F") diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index 7b2fbb54f7d35..b62f550662f5d 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -835,7 +835,7 @@ def _normalize( elif normalize == "index": index_margin = index_margin / index_margin.sum() - table = table._append(index_margin) + table = table._append(index_margin, ignore_index=True) table = table.fillna(0) table.index = table_index @@ -844,7 +844,7 @@ def _normalize( index_margin = index_margin / index_margin.sum() index_margin.loc[margins_name] = 1 table = concat([table, column_margin], axis=1) - table = table._append(index_margin) + table = table._append(index_margin, ignore_index=True) table = table.fillna(0) table.index = table_index diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index ff358e8ba346c..afb0c489c9c94 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -953,7 +953,7 @@ def stack_v3(frame: DataFrame, level: list[int]) -> Series | DataFrame: result: Series | DataFrame if len(buf) > 0 and not frame.empty: - result = concat(buf) + result = concat(buf, ignore_index=True) ratio = len(result) // len(frame) else: # input is empty
For internal `concat` usages where the result index is set later, setting `ignore_index=True` to avoid More Work calculating the result concat index
https://api.github.com/repos/pandas-dev/pandas/pulls/57913
2024-03-19T17:38:04Z
2024-03-22T20:48:52Z
2024-03-22T20:48:52Z
2024-03-22T20:52:43Z
Backport PR #57886 on branch 2.2.x (CI: Remove ASAN job)
diff --git a/.github/actions/run-tests/action.yml b/.github/actions/run-tests/action.yml index b4778b74df335..fd7c3587f2254 100644 --- a/.github/actions/run-tests/action.yml +++ b/.github/actions/run-tests/action.yml @@ -1,16 +1,9 @@ name: Run tests and report results -inputs: - preload: - description: Preload arguments for sanitizer - required: false - asan_options: - description: Arguments for Address Sanitizer (ASAN) - required: false runs: using: composite steps: - name: Test - run: ${{ inputs.asan_options }} ${{ inputs.preload }} ci/run_tests.sh + run: ci/run_tests.sh shell: bash -el {0} - name: Publish test results diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index 8736674bbf965..bacc3d874a60d 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -96,14 +96,6 @@ jobs: - name: "Pyarrow Nightly" env_file: actions-311-pyarrownightly.yaml pattern: "not slow and not network and not single_cpu" - - name: "ASAN / UBSAN" - env_file: actions-311-sanitizers.yaml - pattern: "not slow and not network and not single_cpu and not skip_ubsan" - asan_options: "ASAN_OPTIONS=detect_leaks=0" - preload: LD_PRELOAD=$(gcc -print-file-name=libasan.so) - meson_args: --config-settings=setup-args="-Db_sanitize=address,undefined" - cflags_adds: -fno-sanitize-recover=all - pytest_workers: -1 # disable pytest-xdist as it swallows stderr from ASAN fail-fast: false name: ${{ matrix.name || format('ubuntu-latest {0}', matrix.env_file) }} env: @@ -190,18 +182,12 @@ jobs: - name: Test (not single_cpu) uses: ./.github/actions/run-tests if: ${{ matrix.name != 'Pypy' }} - with: - preload: ${{ matrix.preload }} - asan_options: ${{ matrix.asan_options }} env: # Set pattern to not single_cpu if not already set PATTERN: ${{ env.PATTERN == '' && 'not single_cpu' || matrix.pattern }} - name: Test (single_cpu) uses: ./.github/actions/run-tests - with: - preload: ${{ matrix.preload }} - asan_options: ${{ matrix.asan_options }} env: PATTERN: 'single_cpu' PYTEST_WORKERS: 0 diff --git a/ci/deps/actions-311-sanitizers.yaml b/ci/deps/actions-311-sanitizers.yaml deleted file mode 100644 index f5f04c90bffad..0000000000000 --- a/ci/deps/actions-311-sanitizers.yaml +++ /dev/null @@ -1,32 +0,0 @@ -name: pandas-dev -channels: - - conda-forge -dependencies: - - python=3.11 - - # build dependencies - - versioneer[toml] - - cython>=0.29.33 - - meson[ninja]=1.2.1 - - meson-python=0.13.1 - - # test dependencies - - pytest>=7.3.2 - - pytest-cov - - pytest-xdist>=2.2.0 - - pytest-localserver>=0.7.1 - - pytest-qt>=4.2.0 - - boto3 - - hypothesis>=6.46.1 - - pyqt>=5.15.9 - - # required dependencies - - python-dateutil - - numpy - - pytz - - # pandas dependencies - - pip - - - pip: - - "tzdata>=2022.7"
Backport PR #57886: CI: Remove ASAN job
https://api.github.com/repos/pandas-dev/pandas/pulls/57910
2024-03-19T05:32:07Z
2024-03-19T16:29:17Z
2024-03-19T16:29:17Z
2024-03-19T16:29:17Z
CI: Improve API of --ignore_errors in validate_docstrings.py
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 3c46cb39eeb7e..a9967dcb8efe6 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -68,1212 +68,1212 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then MSG='Validate Docstrings' ; echo $MSG $BASE_DIR/scripts/validate_docstrings.py \ --format=actions \ - -i '*' ES01 `# For now it is ok if docstrings are missing the extended summary` \ - -i pandas.Series.dt PR01 `# Accessors are implemented as classes, but we do not document the Parameters section` \ - -i pandas.Categorical.__array__ SA01\ - -i pandas.Categorical.codes SA01\ - -i pandas.Categorical.dtype SA01\ - -i pandas.Categorical.from_codes SA01\ - -i pandas.Categorical.ordered SA01\ - -i pandas.CategoricalDtype.categories SA01\ - -i pandas.CategoricalDtype.ordered SA01\ - -i pandas.CategoricalIndex.codes SA01\ - -i pandas.CategoricalIndex.ordered SA01\ - -i pandas.DataFrame.__dataframe__ SA01\ - -i pandas.DataFrame.__iter__ SA01\ - -i pandas.DataFrame.assign SA01\ - -i pandas.DataFrame.at_time PR01\ - -i pandas.DataFrame.axes SA01\ - -i pandas.DataFrame.backfill PR01,SA01\ - -i pandas.DataFrame.bfill SA01\ - -i pandas.DataFrame.columns SA01\ - -i pandas.DataFrame.copy SA01\ - -i pandas.DataFrame.droplevel SA01\ - -i pandas.DataFrame.dtypes SA01\ - -i pandas.DataFrame.ffill SA01\ - -i pandas.DataFrame.first_valid_index SA01\ - -i pandas.DataFrame.get SA01\ - -i pandas.DataFrame.hist RT03\ - -i pandas.DataFrame.infer_objects RT03\ - -i pandas.DataFrame.keys SA01\ - -i pandas.DataFrame.kurt RT03,SA01\ - -i pandas.DataFrame.kurtosis RT03,SA01\ - -i pandas.DataFrame.last_valid_index SA01\ - -i pandas.DataFrame.mask RT03\ - -i pandas.DataFrame.max RT03\ - -i pandas.DataFrame.mean RT03,SA01\ - -i pandas.DataFrame.median RT03,SA01\ - -i pandas.DataFrame.min RT03\ - -i pandas.DataFrame.pad PR01,SA01\ - -i pandas.DataFrame.plot PR02,SA01\ - -i pandas.DataFrame.pop SA01\ - -i pandas.DataFrame.prod RT03\ - -i pandas.DataFrame.product RT03\ - -i pandas.DataFrame.reorder_levels SA01\ - -i pandas.DataFrame.sem PR01,RT03,SA01\ - -i pandas.DataFrame.skew RT03,SA01\ - -i pandas.DataFrame.sparse PR01,SA01\ - -i pandas.DataFrame.sparse.density SA01\ - -i pandas.DataFrame.sparse.from_spmatrix SA01\ - -i pandas.DataFrame.sparse.to_coo SA01\ - -i pandas.DataFrame.sparse.to_dense SA01\ - -i pandas.DataFrame.std PR01,RT03,SA01\ - -i pandas.DataFrame.sum RT03\ - -i pandas.DataFrame.swapaxes PR01,SA01\ - -i pandas.DataFrame.swaplevel SA01\ - -i pandas.DataFrame.to_feather SA01\ - -i pandas.DataFrame.to_markdown SA01\ - -i pandas.DataFrame.to_parquet RT03\ - -i pandas.DataFrame.to_period SA01\ - -i pandas.DataFrame.to_timestamp SA01\ - -i pandas.DataFrame.tz_convert SA01\ - -i pandas.DataFrame.tz_localize SA01\ - -i pandas.DataFrame.unstack RT03\ - -i pandas.DataFrame.value_counts RT03\ - -i pandas.DataFrame.var PR01,RT03,SA01\ - -i pandas.DataFrame.where RT03\ - -i pandas.DatetimeIndex.ceil SA01\ - -i pandas.DatetimeIndex.date SA01\ - -i pandas.DatetimeIndex.day SA01\ - -i pandas.DatetimeIndex.day_name SA01\ - -i pandas.DatetimeIndex.day_of_year SA01\ - -i pandas.DatetimeIndex.dayofyear SA01\ - -i pandas.DatetimeIndex.floor SA01\ - -i pandas.DatetimeIndex.freqstr SA01\ - -i pandas.DatetimeIndex.hour SA01\ - -i pandas.DatetimeIndex.indexer_at_time PR01,RT03\ - -i pandas.DatetimeIndex.indexer_between_time RT03\ - -i pandas.DatetimeIndex.inferred_freq SA01\ - -i pandas.DatetimeIndex.is_leap_year SA01\ - -i pandas.DatetimeIndex.microsecond SA01\ - -i pandas.DatetimeIndex.minute SA01\ - -i pandas.DatetimeIndex.month SA01\ - -i pandas.DatetimeIndex.month_name SA01\ - -i pandas.DatetimeIndex.nanosecond SA01\ - -i pandas.DatetimeIndex.quarter SA01\ - -i pandas.DatetimeIndex.round SA01\ - -i pandas.DatetimeIndex.second SA01\ - -i pandas.DatetimeIndex.snap PR01,RT03,SA01\ - -i pandas.DatetimeIndex.std PR01,RT03\ - -i pandas.DatetimeIndex.time SA01\ - -i pandas.DatetimeIndex.timetz SA01\ - -i pandas.DatetimeIndex.to_period RT03\ - -i pandas.DatetimeIndex.to_pydatetime RT03,SA01\ - -i pandas.DatetimeIndex.tz SA01\ - -i pandas.DatetimeIndex.tz_convert RT03\ - -i pandas.DatetimeIndex.year SA01\ - -i pandas.DatetimeTZDtype SA01\ - -i pandas.DatetimeTZDtype.tz SA01\ - -i pandas.DatetimeTZDtype.unit SA01\ - -i pandas.ExcelFile PR01,SA01\ - -i pandas.ExcelFile.parse PR01,SA01\ - -i pandas.ExcelWriter SA01\ - -i pandas.Float32Dtype SA01\ - -i pandas.Float64Dtype SA01\ - -i pandas.Grouper PR02,SA01\ - -i pandas.HDFStore.append PR01,SA01\ - -i pandas.HDFStore.get SA01\ - -i pandas.HDFStore.groups SA01\ - -i pandas.HDFStore.info RT03,SA01\ - -i pandas.HDFStore.keys SA01\ - -i pandas.HDFStore.put PR01,SA01\ - -i pandas.HDFStore.select SA01\ - -i pandas.HDFStore.walk SA01\ - -i pandas.Index PR07\ - -i pandas.Index.T SA01\ - -i pandas.Index.append PR07,RT03,SA01\ - -i pandas.Index.astype SA01\ - -i pandas.Index.copy PR07,SA01\ - -i pandas.Index.difference PR07,RT03,SA01\ - -i pandas.Index.drop PR07,SA01\ - -i pandas.Index.drop_duplicates RT03\ - -i pandas.Index.droplevel RT03,SA01\ - -i pandas.Index.dropna RT03,SA01\ - -i pandas.Index.dtype SA01\ - -i pandas.Index.duplicated RT03\ - -i pandas.Index.empty GL08\ - -i pandas.Index.equals SA01\ - -i pandas.Index.fillna RT03\ - -i pandas.Index.get_indexer PR07,SA01\ - -i pandas.Index.get_indexer_for PR01,SA01\ - -i pandas.Index.get_indexer_non_unique PR07,SA01\ - -i pandas.Index.get_loc PR07,RT03,SA01\ - -i pandas.Index.get_slice_bound PR07\ - -i pandas.Index.hasnans SA01\ - -i pandas.Index.identical PR01,SA01\ - -i pandas.Index.inferred_type SA01\ - -i pandas.Index.insert PR07,RT03,SA01\ - -i pandas.Index.intersection PR07,RT03,SA01\ - -i pandas.Index.item SA01\ - -i pandas.Index.join PR07,RT03,SA01\ - -i pandas.Index.map SA01\ - -i pandas.Index.memory_usage RT03\ - -i pandas.Index.name SA01\ - -i pandas.Index.names GL08\ - -i pandas.Index.nbytes SA01\ - -i pandas.Index.ndim SA01\ - -i pandas.Index.nunique RT03\ - -i pandas.Index.putmask PR01,RT03\ - -i pandas.Index.ravel PR01,RT03\ - -i pandas.Index.reindex PR07\ - -i pandas.Index.shape SA01\ - -i pandas.Index.size SA01\ - -i pandas.Index.slice_indexer PR07,RT03,SA01\ - -i pandas.Index.slice_locs RT03\ - -i pandas.Index.str PR01,SA01\ - -i pandas.Index.symmetric_difference PR07,RT03,SA01\ - -i pandas.Index.take PR01,PR07\ - -i pandas.Index.to_list RT03\ - -i pandas.Index.union PR07,RT03,SA01\ - -i pandas.Index.unique RT03\ - -i pandas.Index.value_counts RT03\ - -i pandas.Index.view GL08\ - -i pandas.Int16Dtype SA01\ - -i pandas.Int32Dtype SA01\ - -i pandas.Int64Dtype SA01\ - -i pandas.Int8Dtype SA01\ - -i pandas.Interval PR02\ - -i pandas.Interval.closed SA01\ - -i pandas.Interval.left SA01\ - -i pandas.Interval.mid SA01\ - -i pandas.Interval.right SA01\ - -i pandas.IntervalDtype PR01,SA01\ - -i pandas.IntervalDtype.subtype SA01\ - -i pandas.IntervalIndex.closed SA01\ - -i pandas.IntervalIndex.contains RT03\ - -i pandas.IntervalIndex.get_indexer PR07,SA01\ - -i pandas.IntervalIndex.get_loc PR07,RT03,SA01\ - -i pandas.IntervalIndex.is_non_overlapping_monotonic SA01\ - -i pandas.IntervalIndex.left GL08\ - -i pandas.IntervalIndex.length GL08\ - -i pandas.IntervalIndex.mid GL08\ - -i pandas.IntervalIndex.right GL08\ - -i pandas.IntervalIndex.set_closed RT03,SA01\ - -i pandas.IntervalIndex.to_tuples RT03,SA01\ - -i pandas.MultiIndex PR01\ - -i pandas.MultiIndex.append PR07,SA01\ - -i pandas.MultiIndex.copy PR07,RT03,SA01\ - -i pandas.MultiIndex.drop PR07,RT03,SA01\ - -i pandas.MultiIndex.droplevel RT03,SA01\ - -i pandas.MultiIndex.dtypes SA01\ - -i pandas.MultiIndex.get_indexer PR07,SA01\ - -i pandas.MultiIndex.get_level_values SA01\ - -i pandas.MultiIndex.get_loc PR07\ - -i pandas.MultiIndex.get_loc_level PR07\ - -i pandas.MultiIndex.levels SA01\ - -i pandas.MultiIndex.levshape SA01\ - -i pandas.MultiIndex.names SA01\ - -i pandas.MultiIndex.nlevels SA01\ - -i pandas.MultiIndex.remove_unused_levels RT03,SA01\ - -i pandas.MultiIndex.reorder_levels RT03,SA01\ - -i pandas.MultiIndex.set_codes SA01\ - -i pandas.MultiIndex.set_levels RT03,SA01\ - -i pandas.MultiIndex.sortlevel PR07,SA01\ - -i pandas.MultiIndex.to_frame RT03\ - -i pandas.MultiIndex.truncate SA01\ - -i pandas.NA SA01\ - -i pandas.NaT SA01\ - -i pandas.NamedAgg SA01\ - -i pandas.Period SA01\ - -i pandas.Period.asfreq SA01\ - -i pandas.Period.freq GL08\ - -i pandas.Period.freqstr SA01\ - -i pandas.Period.is_leap_year SA01\ - -i pandas.Period.month SA01\ - -i pandas.Period.now SA01\ - -i pandas.Period.ordinal GL08\ - -i pandas.Period.quarter SA01\ - -i pandas.Period.strftime PR01,SA01\ - -i pandas.Period.to_timestamp SA01\ - -i pandas.Period.year SA01\ - -i pandas.PeriodDtype SA01\ - -i pandas.PeriodDtype.freq SA01\ - -i pandas.PeriodIndex.day SA01\ - -i pandas.PeriodIndex.day_of_week SA01\ - -i pandas.PeriodIndex.day_of_year SA01\ - -i pandas.PeriodIndex.dayofweek SA01\ - -i pandas.PeriodIndex.dayofyear SA01\ - -i pandas.PeriodIndex.days_in_month SA01\ - -i pandas.PeriodIndex.daysinmonth SA01\ - -i pandas.PeriodIndex.freqstr SA01\ - -i pandas.PeriodIndex.from_fields PR07,SA01\ - -i pandas.PeriodIndex.from_ordinals SA01\ - -i pandas.PeriodIndex.hour SA01\ - -i pandas.PeriodIndex.is_leap_year SA01\ - -i pandas.PeriodIndex.minute SA01\ - -i pandas.PeriodIndex.month SA01\ - -i pandas.PeriodIndex.quarter SA01\ - -i pandas.PeriodIndex.qyear GL08\ - -i pandas.PeriodIndex.second SA01\ - -i pandas.PeriodIndex.to_timestamp RT03,SA01\ - -i pandas.PeriodIndex.week SA01\ - -i pandas.PeriodIndex.weekday SA01\ - -i pandas.PeriodIndex.weekofyear SA01\ - -i pandas.PeriodIndex.year SA01\ - -i pandas.RangeIndex PR07\ - -i pandas.RangeIndex.from_range PR01,SA01\ - -i pandas.RangeIndex.start SA01\ - -i pandas.RangeIndex.step SA01\ - -i pandas.RangeIndex.stop SA01\ - -i pandas.Series SA01\ - -i pandas.Series.T SA01\ - -i pandas.Series.__iter__ RT03,SA01\ - -i pandas.Series.add PR07\ - -i pandas.Series.at_time PR01\ - -i pandas.Series.backfill PR01,SA01\ - -i pandas.Series.bfill SA01\ - -i pandas.Series.case_when RT03\ - -i pandas.Series.cat PR07,SA01\ - -i pandas.Series.cat.add_categories PR01,PR02\ - -i pandas.Series.cat.as_ordered PR01\ - -i pandas.Series.cat.as_unordered PR01\ - -i pandas.Series.cat.codes SA01\ - -i pandas.Series.cat.ordered SA01\ - -i pandas.Series.cat.remove_categories PR01,PR02\ - -i pandas.Series.cat.remove_unused_categories PR01\ - -i pandas.Series.cat.rename_categories PR01,PR02\ - -i pandas.Series.cat.reorder_categories PR01,PR02\ - -i pandas.Series.cat.set_categories PR01,PR02\ - -i pandas.Series.copy SA01\ - -i pandas.Series.div PR07\ - -i pandas.Series.droplevel SA01\ - -i pandas.Series.dt.as_unit PR01,PR02\ - -i pandas.Series.dt.ceil PR01,PR02,SA01\ - -i pandas.Series.dt.components SA01\ - -i pandas.Series.dt.date SA01\ - -i pandas.Series.dt.day SA01\ - -i pandas.Series.dt.day_name PR01,PR02,SA01\ - -i pandas.Series.dt.day_of_year SA01\ - -i pandas.Series.dt.dayofyear SA01\ - -i pandas.Series.dt.days SA01\ - -i pandas.Series.dt.days_in_month SA01\ - -i pandas.Series.dt.daysinmonth SA01\ - -i pandas.Series.dt.floor PR01,PR02,SA01\ - -i pandas.Series.dt.freq GL08\ - -i pandas.Series.dt.hour SA01\ - -i pandas.Series.dt.is_leap_year SA01\ - -i pandas.Series.dt.microsecond SA01\ - -i pandas.Series.dt.microseconds SA01\ - -i pandas.Series.dt.minute SA01\ - -i pandas.Series.dt.month SA01\ - -i pandas.Series.dt.month_name PR01,PR02,SA01\ - -i pandas.Series.dt.nanosecond SA01\ - -i pandas.Series.dt.nanoseconds SA01\ - -i pandas.Series.dt.normalize PR01\ - -i pandas.Series.dt.quarter SA01\ - -i pandas.Series.dt.qyear GL08\ - -i pandas.Series.dt.round PR01,PR02,SA01\ - -i pandas.Series.dt.second SA01\ - -i pandas.Series.dt.seconds SA01\ - -i pandas.Series.dt.strftime PR01,PR02\ - -i pandas.Series.dt.time SA01\ - -i pandas.Series.dt.timetz SA01\ - -i pandas.Series.dt.to_period PR01,PR02,RT03\ - -i pandas.Series.dt.total_seconds PR01\ - -i pandas.Series.dt.tz SA01\ - -i pandas.Series.dt.tz_convert PR01,PR02,RT03\ - -i pandas.Series.dt.tz_localize PR01,PR02\ - -i pandas.Series.dt.unit GL08\ - -i pandas.Series.dt.year SA01\ - -i pandas.Series.dtype SA01\ - -i pandas.Series.dtypes SA01\ - -i pandas.Series.empty GL08\ - -i pandas.Series.eq PR07,SA01\ - -i pandas.Series.ffill SA01\ - -i pandas.Series.first_valid_index SA01\ - -i pandas.Series.floordiv PR07\ - -i pandas.Series.ge PR07,SA01\ - -i pandas.Series.get SA01\ - -i pandas.Series.gt PR07,SA01\ - -i pandas.Series.hasnans SA01\ - -i pandas.Series.infer_objects RT03\ - -i pandas.Series.is_monotonic_decreasing SA01\ - -i pandas.Series.is_monotonic_increasing SA01\ - -i pandas.Series.is_unique SA01\ - -i pandas.Series.item SA01\ - -i pandas.Series.keys SA01\ - -i pandas.Series.kurt RT03,SA01\ - -i pandas.Series.kurtosis RT03,SA01\ - -i pandas.Series.last_valid_index SA01\ - -i pandas.Series.le PR07,SA01\ - -i pandas.Series.list.__getitem__ SA01\ - -i pandas.Series.list.flatten SA01\ - -i pandas.Series.list.len SA01\ - -i pandas.Series.lt PR07,SA01\ - -i pandas.Series.mask RT03\ - -i pandas.Series.max RT03\ - -i pandas.Series.mean RT03,SA01\ - -i pandas.Series.median RT03,SA01\ - -i pandas.Series.min RT03\ - -i pandas.Series.mod PR07\ - -i pandas.Series.mode SA01\ - -i pandas.Series.mul PR07\ - -i pandas.Series.nbytes SA01\ - -i pandas.Series.ndim SA01\ - -i pandas.Series.ne PR07,SA01\ - -i pandas.Series.nunique RT03\ - -i pandas.Series.pad PR01,SA01\ - -i pandas.Series.plot PR02,SA01\ - -i pandas.Series.pop RT03,SA01\ - -i pandas.Series.pow PR07\ - -i pandas.Series.prod RT03\ - -i pandas.Series.product RT03\ - -i pandas.Series.radd PR07\ - -i pandas.Series.rdiv PR07\ - -i pandas.Series.reorder_levels RT03,SA01\ - -i pandas.Series.rfloordiv PR07\ - -i pandas.Series.rmod PR07\ - -i pandas.Series.rmul PR07\ - -i pandas.Series.rpow PR07\ - -i pandas.Series.rsub PR07\ - -i pandas.Series.rtruediv PR07\ - -i pandas.Series.sem PR01,RT03,SA01\ - -i pandas.Series.shape SA01\ - -i pandas.Series.size SA01\ - -i pandas.Series.skew RT03,SA01\ - -i pandas.Series.sparse PR01,SA01\ - -i pandas.Series.sparse.density SA01\ - -i pandas.Series.sparse.fill_value SA01\ - -i pandas.Series.sparse.from_coo PR07,SA01\ - -i pandas.Series.sparse.npoints SA01\ - -i pandas.Series.sparse.sp_values SA01\ - -i pandas.Series.sparse.to_coo PR07,RT03,SA01\ - -i pandas.Series.std PR01,RT03,SA01\ - -i pandas.Series.str PR01,SA01\ - -i pandas.Series.str.capitalize RT03\ - -i pandas.Series.str.casefold RT03\ - -i pandas.Series.str.center RT03,SA01\ - -i pandas.Series.str.decode PR07,RT03,SA01\ - -i pandas.Series.str.encode PR07,RT03,SA01\ - -i pandas.Series.str.find RT03\ - -i pandas.Series.str.fullmatch RT03\ - -i pandas.Series.str.get RT03,SA01\ - -i pandas.Series.str.index RT03\ - -i pandas.Series.str.ljust RT03,SA01\ - -i pandas.Series.str.lower RT03\ - -i pandas.Series.str.lstrip RT03\ - -i pandas.Series.str.match RT03\ - -i pandas.Series.str.normalize RT03,SA01\ - -i pandas.Series.str.partition RT03\ - -i pandas.Series.str.repeat SA01\ - -i pandas.Series.str.replace SA01\ - -i pandas.Series.str.rfind RT03\ - -i pandas.Series.str.rindex RT03\ - -i pandas.Series.str.rjust RT03,SA01\ - -i pandas.Series.str.rpartition RT03\ - -i pandas.Series.str.rstrip RT03\ - -i pandas.Series.str.strip RT03\ - -i pandas.Series.str.swapcase RT03\ - -i pandas.Series.str.title RT03\ - -i pandas.Series.str.translate RT03,SA01\ - -i pandas.Series.str.upper RT03\ - -i pandas.Series.str.wrap RT03,SA01\ - -i pandas.Series.str.zfill RT03\ - -i pandas.Series.struct.dtypes SA01\ - -i pandas.Series.sub PR07\ - -i pandas.Series.sum RT03\ - -i pandas.Series.swaplevel SA01\ - -i pandas.Series.to_dict SA01\ - -i pandas.Series.to_frame SA01\ - -i pandas.Series.to_list RT03\ - -i pandas.Series.to_markdown SA01\ - -i pandas.Series.to_period SA01\ - -i pandas.Series.to_string SA01\ - -i pandas.Series.to_timestamp RT03,SA01\ - -i pandas.Series.truediv PR07\ - -i pandas.Series.tz_convert SA01\ - -i pandas.Series.tz_localize SA01\ - -i pandas.Series.unstack SA01\ - -i pandas.Series.update PR07,SA01\ - -i pandas.Series.value_counts RT03\ - -i pandas.Series.var PR01,RT03,SA01\ - -i pandas.Series.where RT03\ - -i pandas.SparseDtype SA01\ - -i pandas.Timedelta PR07,SA01\ - -i pandas.Timedelta.as_unit SA01\ - -i pandas.Timedelta.asm8 SA01\ - -i pandas.Timedelta.ceil SA01\ - -i pandas.Timedelta.components SA01\ - -i pandas.Timedelta.days SA01\ - -i pandas.Timedelta.floor SA01\ - -i pandas.Timedelta.max PR02,PR07,SA01\ - -i pandas.Timedelta.min PR02,PR07,SA01\ - -i pandas.Timedelta.resolution PR02,PR07,SA01\ - -i pandas.Timedelta.round SA01\ - -i pandas.Timedelta.to_numpy PR01\ - -i pandas.Timedelta.to_timedelta64 SA01\ - -i pandas.Timedelta.total_seconds SA01\ - -i pandas.Timedelta.view SA01\ - -i pandas.TimedeltaIndex PR01\ - -i pandas.TimedeltaIndex.as_unit RT03,SA01\ - -i pandas.TimedeltaIndex.ceil SA01\ - -i pandas.TimedeltaIndex.components SA01\ - -i pandas.TimedeltaIndex.days SA01\ - -i pandas.TimedeltaIndex.floor SA01\ - -i pandas.TimedeltaIndex.inferred_freq SA01\ - -i pandas.TimedeltaIndex.microseconds SA01\ - -i pandas.TimedeltaIndex.nanoseconds SA01\ - -i pandas.TimedeltaIndex.round SA01\ - -i pandas.TimedeltaIndex.seconds SA01\ - -i pandas.TimedeltaIndex.to_pytimedelta RT03,SA01\ - -i pandas.Timestamp PR07,SA01\ - -i pandas.Timestamp.as_unit SA01\ - -i pandas.Timestamp.asm8 SA01\ - -i pandas.Timestamp.astimezone SA01\ - -i pandas.Timestamp.ceil SA01\ - -i pandas.Timestamp.combine PR01,SA01\ - -i pandas.Timestamp.ctime SA01\ - -i pandas.Timestamp.date SA01\ - -i pandas.Timestamp.day GL08\ - -i pandas.Timestamp.day_name SA01\ - -i pandas.Timestamp.day_of_week SA01\ - -i pandas.Timestamp.day_of_year SA01\ - -i pandas.Timestamp.dayofweek SA01\ - -i pandas.Timestamp.dayofyear SA01\ - -i pandas.Timestamp.days_in_month SA01\ - -i pandas.Timestamp.daysinmonth SA01\ - -i pandas.Timestamp.dst SA01\ - -i pandas.Timestamp.floor SA01\ - -i pandas.Timestamp.fold GL08\ - -i pandas.Timestamp.fromordinal SA01\ - -i pandas.Timestamp.fromtimestamp PR01,SA01\ - -i pandas.Timestamp.hour GL08\ - -i pandas.Timestamp.is_leap_year SA01\ - -i pandas.Timestamp.isocalendar SA01\ - -i pandas.Timestamp.isoformat SA01\ - -i pandas.Timestamp.isoweekday SA01\ - -i pandas.Timestamp.max PR02,PR07,SA01\ - -i pandas.Timestamp.microsecond GL08\ - -i pandas.Timestamp.min PR02,PR07,SA01\ - -i pandas.Timestamp.minute GL08\ - -i pandas.Timestamp.month GL08\ - -i pandas.Timestamp.month_name SA01\ - -i pandas.Timestamp.nanosecond GL08\ - -i pandas.Timestamp.normalize SA01\ - -i pandas.Timestamp.now SA01\ - -i pandas.Timestamp.quarter SA01\ - -i pandas.Timestamp.replace PR07,SA01\ - -i pandas.Timestamp.resolution PR02,PR07,SA01\ - -i pandas.Timestamp.round SA01\ - -i pandas.Timestamp.second GL08\ - -i pandas.Timestamp.strftime SA01\ - -i pandas.Timestamp.strptime PR01,SA01\ - -i pandas.Timestamp.time SA01\ - -i pandas.Timestamp.timestamp SA01\ - -i pandas.Timestamp.timetuple SA01\ - -i pandas.Timestamp.timetz SA01\ - -i pandas.Timestamp.to_datetime64 SA01\ - -i pandas.Timestamp.to_julian_date SA01\ - -i pandas.Timestamp.to_numpy PR01\ - -i pandas.Timestamp.to_period PR01,SA01\ - -i pandas.Timestamp.to_pydatetime PR01,SA01\ - -i pandas.Timestamp.today SA01\ - -i pandas.Timestamp.toordinal SA01\ - -i pandas.Timestamp.tz SA01\ - -i pandas.Timestamp.tz_convert SA01\ - -i pandas.Timestamp.tz_localize SA01\ - -i pandas.Timestamp.tzinfo GL08\ - -i pandas.Timestamp.tzname SA01\ - -i pandas.Timestamp.unit SA01\ - -i pandas.Timestamp.utcfromtimestamp PR01,SA01\ - -i pandas.Timestamp.utcnow SA01\ - -i pandas.Timestamp.utcoffset SA01\ - -i pandas.Timestamp.utctimetuple SA01\ - -i pandas.Timestamp.value GL08\ - -i pandas.Timestamp.week SA01\ - -i pandas.Timestamp.weekday SA01\ - -i pandas.Timestamp.weekofyear SA01\ - -i pandas.Timestamp.year GL08\ - -i pandas.UInt16Dtype SA01\ - -i pandas.UInt32Dtype SA01\ - -i pandas.UInt64Dtype SA01\ - -i pandas.UInt8Dtype SA01\ - -i pandas.api.extensions.ExtensionArray SA01\ - -i pandas.api.extensions.ExtensionArray._accumulate RT03,SA01\ - -i pandas.api.extensions.ExtensionArray._concat_same_type PR07,SA01\ - -i pandas.api.extensions.ExtensionArray._formatter SA01\ - -i pandas.api.extensions.ExtensionArray._from_sequence SA01\ - -i pandas.api.extensions.ExtensionArray._from_sequence_of_strings SA01\ - -i pandas.api.extensions.ExtensionArray._hash_pandas_object RT03,SA01\ - -i pandas.api.extensions.ExtensionArray._pad_or_backfill PR01,RT03,SA01\ - -i pandas.api.extensions.ExtensionArray._reduce RT03,SA01\ - -i pandas.api.extensions.ExtensionArray._values_for_factorize SA01\ - -i pandas.api.extensions.ExtensionArray.astype SA01\ - -i pandas.api.extensions.ExtensionArray.copy RT03,SA01\ - -i pandas.api.extensions.ExtensionArray.dropna RT03,SA01\ - -i pandas.api.extensions.ExtensionArray.dtype SA01\ - -i pandas.api.extensions.ExtensionArray.duplicated RT03,SA01\ - -i pandas.api.extensions.ExtensionArray.equals SA01\ - -i pandas.api.extensions.ExtensionArray.fillna SA01\ - -i pandas.api.extensions.ExtensionArray.insert PR07,RT03,SA01\ - -i pandas.api.extensions.ExtensionArray.interpolate PR01,SA01\ - -i pandas.api.extensions.ExtensionArray.isin PR07,RT03,SA01\ - -i pandas.api.extensions.ExtensionArray.isna SA01\ - -i pandas.api.extensions.ExtensionArray.nbytes SA01\ - -i pandas.api.extensions.ExtensionArray.ndim SA01\ - -i pandas.api.extensions.ExtensionArray.ravel RT03,SA01\ - -i pandas.api.extensions.ExtensionArray.shape SA01\ - -i pandas.api.extensions.ExtensionArray.shift SA01\ - -i pandas.api.extensions.ExtensionArray.take RT03\ - -i pandas.api.extensions.ExtensionArray.tolist RT03,SA01\ - -i pandas.api.extensions.ExtensionArray.unique RT03,SA01\ - -i pandas.api.extensions.ExtensionArray.view SA01\ - -i pandas.api.extensions.register_extension_dtype SA01\ - -i pandas.api.indexers.BaseIndexer PR01,SA01\ - -i pandas.api.indexers.FixedForwardWindowIndexer PR01,SA01\ - -i pandas.api.indexers.VariableOffsetWindowIndexer PR01,SA01\ - -i pandas.api.interchange.from_dataframe RT03,SA01\ - -i pandas.api.types.infer_dtype PR07,SA01\ - -i pandas.api.types.is_any_real_numeric_dtype SA01\ - -i pandas.api.types.is_bool PR01,SA01\ - -i pandas.api.types.is_bool_dtype SA01\ - -i pandas.api.types.is_categorical_dtype SA01\ - -i pandas.api.types.is_complex PR01,SA01\ - -i pandas.api.types.is_complex_dtype SA01\ - -i pandas.api.types.is_datetime64_any_dtype SA01\ - -i pandas.api.types.is_datetime64_dtype SA01\ - -i pandas.api.types.is_datetime64_ns_dtype SA01\ - -i pandas.api.types.is_datetime64tz_dtype SA01\ - -i pandas.api.types.is_dict_like PR07,SA01\ - -i pandas.api.types.is_extension_array_dtype SA01\ - -i pandas.api.types.is_file_like PR07,SA01\ - -i pandas.api.types.is_float PR01,SA01\ - -i pandas.api.types.is_float_dtype SA01\ - -i pandas.api.types.is_hashable PR01,RT03,SA01\ - -i pandas.api.types.is_int64_dtype SA01\ - -i pandas.api.types.is_integer PR01,SA01\ - -i pandas.api.types.is_integer_dtype SA01\ - -i pandas.api.types.is_interval_dtype SA01\ - -i pandas.api.types.is_iterator PR07,SA01\ - -i pandas.api.types.is_list_like SA01\ - -i pandas.api.types.is_named_tuple PR07,SA01\ - -i pandas.api.types.is_numeric_dtype SA01\ - -i pandas.api.types.is_object_dtype SA01\ - -i pandas.api.types.is_period_dtype SA01\ - -i pandas.api.types.is_re PR07,SA01\ - -i pandas.api.types.is_re_compilable PR07,SA01\ - -i pandas.api.types.is_scalar SA01\ - -i pandas.api.types.is_signed_integer_dtype SA01\ - -i pandas.api.types.is_sparse SA01\ - -i pandas.api.types.is_string_dtype SA01\ - -i pandas.api.types.is_timedelta64_dtype SA01\ - -i pandas.api.types.is_timedelta64_ns_dtype SA01\ - -i pandas.api.types.is_unsigned_integer_dtype SA01\ - -i pandas.api.types.pandas_dtype PR07,RT03,SA01\ - -i pandas.api.types.union_categoricals RT03,SA01\ - -i pandas.arrays.ArrowExtensionArray PR07,SA01\ - -i pandas.arrays.BooleanArray SA01\ - -i pandas.arrays.DatetimeArray SA01\ - -i pandas.arrays.FloatingArray SA01\ - -i pandas.arrays.IntegerArray SA01\ - -i pandas.arrays.IntervalArray.closed SA01\ - -i pandas.arrays.IntervalArray.contains RT03\ - -i pandas.arrays.IntervalArray.is_non_overlapping_monotonic SA01\ - -i pandas.arrays.IntervalArray.left SA01\ - -i pandas.arrays.IntervalArray.length SA01\ - -i pandas.arrays.IntervalArray.mid SA01\ - -i pandas.arrays.IntervalArray.right SA01\ - -i pandas.arrays.IntervalArray.set_closed RT03,SA01\ - -i pandas.arrays.IntervalArray.to_tuples RT03,SA01\ - -i pandas.arrays.NumpyExtensionArray SA01\ - -i pandas.arrays.SparseArray PR07,SA01\ - -i pandas.arrays.TimedeltaArray PR07,SA01\ - -i pandas.bdate_range RT03,SA01\ - -i pandas.core.groupby.DataFrameGroupBy.__iter__ RT03,SA01\ - -i pandas.core.groupby.DataFrameGroupBy.agg RT03\ - -i pandas.core.groupby.DataFrameGroupBy.aggregate RT03\ - -i pandas.core.groupby.DataFrameGroupBy.apply RT03\ - -i pandas.core.groupby.DataFrameGroupBy.boxplot PR07,RT03,SA01\ - -i pandas.core.groupby.DataFrameGroupBy.cummax RT03\ - -i pandas.core.groupby.DataFrameGroupBy.cummin RT03\ - -i pandas.core.groupby.DataFrameGroupBy.cumprod RT03\ - -i pandas.core.groupby.DataFrameGroupBy.cumsum RT03\ - -i pandas.core.groupby.DataFrameGroupBy.filter RT03,SA01\ - -i pandas.core.groupby.DataFrameGroupBy.get_group RT03,SA01\ - -i pandas.core.groupby.DataFrameGroupBy.groups SA01\ - -i pandas.core.groupby.DataFrameGroupBy.hist RT03\ - -i pandas.core.groupby.DataFrameGroupBy.indices SA01\ - -i pandas.core.groupby.DataFrameGroupBy.max SA01\ - -i pandas.core.groupby.DataFrameGroupBy.mean RT03\ - -i pandas.core.groupby.DataFrameGroupBy.median SA01\ - -i pandas.core.groupby.DataFrameGroupBy.min SA01\ - -i pandas.core.groupby.DataFrameGroupBy.nth PR02\ - -i pandas.core.groupby.DataFrameGroupBy.nunique RT03,SA01\ - -i pandas.core.groupby.DataFrameGroupBy.ohlc SA01\ - -i pandas.core.groupby.DataFrameGroupBy.plot PR02,SA01\ - -i pandas.core.groupby.DataFrameGroupBy.prod SA01\ - -i pandas.core.groupby.DataFrameGroupBy.rank RT03\ - -i pandas.core.groupby.DataFrameGroupBy.resample RT03\ - -i pandas.core.groupby.DataFrameGroupBy.sem SA01\ - -i pandas.core.groupby.DataFrameGroupBy.skew RT03\ - -i pandas.core.groupby.DataFrameGroupBy.sum SA01\ - -i pandas.core.groupby.DataFrameGroupBy.transform RT03\ - -i pandas.core.groupby.SeriesGroupBy.__iter__ RT03,SA01\ - -i pandas.core.groupby.SeriesGroupBy.agg RT03\ - -i pandas.core.groupby.SeriesGroupBy.aggregate RT03\ - -i pandas.core.groupby.SeriesGroupBy.apply RT03\ - -i pandas.core.groupby.SeriesGroupBy.cummax RT03\ - -i pandas.core.groupby.SeriesGroupBy.cummin RT03\ - -i pandas.core.groupby.SeriesGroupBy.cumprod RT03\ - -i pandas.core.groupby.SeriesGroupBy.cumsum RT03\ - -i pandas.core.groupby.SeriesGroupBy.filter PR01,RT03,SA01\ - -i pandas.core.groupby.SeriesGroupBy.get_group RT03,SA01\ - -i pandas.core.groupby.SeriesGroupBy.groups SA01\ - -i pandas.core.groupby.SeriesGroupBy.indices SA01\ - -i pandas.core.groupby.SeriesGroupBy.is_monotonic_decreasing SA01\ - -i pandas.core.groupby.SeriesGroupBy.is_monotonic_increasing SA01\ - -i pandas.core.groupby.SeriesGroupBy.max SA01\ - -i pandas.core.groupby.SeriesGroupBy.mean RT03\ - -i pandas.core.groupby.SeriesGroupBy.median SA01\ - -i pandas.core.groupby.SeriesGroupBy.min SA01\ - -i pandas.core.groupby.SeriesGroupBy.nth PR02\ - -i pandas.core.groupby.SeriesGroupBy.ohlc SA01\ - -i pandas.core.groupby.SeriesGroupBy.plot PR02,SA01\ - -i pandas.core.groupby.SeriesGroupBy.prod SA01\ - -i pandas.core.groupby.SeriesGroupBy.rank RT03\ - -i pandas.core.groupby.SeriesGroupBy.resample RT03\ - -i pandas.core.groupby.SeriesGroupBy.sem SA01\ - -i pandas.core.groupby.SeriesGroupBy.skew RT03\ - -i pandas.core.groupby.SeriesGroupBy.sum SA01\ - -i pandas.core.groupby.SeriesGroupBy.transform RT03\ - -i pandas.core.resample.Resampler.__iter__ RT03,SA01\ - -i pandas.core.resample.Resampler.ffill RT03\ - -i pandas.core.resample.Resampler.get_group RT03,SA01\ - -i pandas.core.resample.Resampler.groups SA01\ - -i pandas.core.resample.Resampler.indices SA01\ - -i pandas.core.resample.Resampler.max PR01,RT03,SA01\ - -i pandas.core.resample.Resampler.mean SA01\ - -i pandas.core.resample.Resampler.median SA01\ - -i pandas.core.resample.Resampler.min PR01,RT03,SA01\ - -i pandas.core.resample.Resampler.ohlc SA01\ - -i pandas.core.resample.Resampler.prod SA01\ - -i pandas.core.resample.Resampler.quantile PR01,PR07\ - -i pandas.core.resample.Resampler.sem SA01\ - -i pandas.core.resample.Resampler.std SA01\ - -i pandas.core.resample.Resampler.sum SA01\ - -i pandas.core.resample.Resampler.transform PR01,RT03,SA01\ - -i pandas.core.resample.Resampler.var SA01\ - -i pandas.core.window.expanding.Expanding.corr PR01\ - -i pandas.core.window.expanding.Expanding.count PR01\ - -i pandas.core.window.rolling.Rolling.max PR01\ - -i pandas.core.window.rolling.Window.std PR01\ - -i pandas.core.window.rolling.Window.var PR01\ - -i pandas.date_range RT03\ - -i pandas.describe_option SA01\ - -i pandas.errors.AbstractMethodError PR01,SA01\ - -i pandas.errors.AttributeConflictWarning SA01\ - -i pandas.errors.CSSWarning SA01\ - -i pandas.errors.CategoricalConversionWarning SA01\ - -i pandas.errors.ChainedAssignmentError SA01\ - -i pandas.errors.ClosedFileError SA01\ - -i pandas.errors.DataError SA01\ - -i pandas.errors.DuplicateLabelError SA01\ - -i pandas.errors.EmptyDataError SA01\ - -i pandas.errors.IntCastingNaNError SA01\ - -i pandas.errors.InvalidIndexError SA01\ - -i pandas.errors.InvalidVersion SA01\ - -i pandas.errors.MergeError SA01\ - -i pandas.errors.NullFrequencyError SA01\ - -i pandas.errors.NumExprClobberingError SA01\ - -i pandas.errors.NumbaUtilError SA01\ - -i pandas.errors.OptionError SA01\ - -i pandas.errors.OutOfBoundsDatetime SA01\ - -i pandas.errors.OutOfBoundsTimedelta SA01\ - -i pandas.errors.PerformanceWarning SA01\ - -i pandas.errors.PossibleDataLossError SA01\ - -i pandas.errors.PossiblePrecisionLoss SA01\ - -i pandas.errors.SpecificationError SA01\ - -i pandas.errors.UndefinedVariableError PR01,SA01\ - -i pandas.errors.UnsortedIndexError SA01\ - -i pandas.errors.UnsupportedFunctionCall SA01\ - -i pandas.errors.ValueLabelTypeMismatch SA01\ - -i pandas.get_option SA01\ - -i pandas.infer_freq SA01\ - -i pandas.interval_range RT03\ - -i pandas.io.formats.style.Styler.apply RT03\ - -i pandas.io.formats.style.Styler.apply_index RT03\ - -i pandas.io.formats.style.Styler.background_gradient RT03\ - -i pandas.io.formats.style.Styler.bar RT03,SA01\ - -i pandas.io.formats.style.Styler.clear SA01\ - -i pandas.io.formats.style.Styler.concat RT03,SA01\ - -i pandas.io.formats.style.Styler.export RT03\ - -i pandas.io.formats.style.Styler.format RT03\ - -i pandas.io.formats.style.Styler.format_index RT03\ - -i pandas.io.formats.style.Styler.from_custom_template SA01\ - -i pandas.io.formats.style.Styler.hide RT03,SA01\ - -i pandas.io.formats.style.Styler.highlight_between RT03\ - -i pandas.io.formats.style.Styler.highlight_max RT03\ - -i pandas.io.formats.style.Styler.highlight_min RT03\ - -i pandas.io.formats.style.Styler.highlight_null RT03\ - -i pandas.io.formats.style.Styler.highlight_quantile RT03\ - -i pandas.io.formats.style.Styler.map RT03\ - -i pandas.io.formats.style.Styler.map_index RT03\ - -i pandas.io.formats.style.Styler.relabel_index RT03\ - -i pandas.io.formats.style.Styler.set_caption RT03,SA01\ - -i pandas.io.formats.style.Styler.set_properties RT03,SA01\ - -i pandas.io.formats.style.Styler.set_sticky RT03,SA01\ - -i pandas.io.formats.style.Styler.set_table_attributes PR07,RT03\ - -i pandas.io.formats.style.Styler.set_table_styles RT03\ - -i pandas.io.formats.style.Styler.set_td_classes RT03\ - -i pandas.io.formats.style.Styler.set_tooltips RT03,SA01\ - -i pandas.io.formats.style.Styler.set_uuid PR07,RT03,SA01\ - -i pandas.io.formats.style.Styler.text_gradient RT03\ - -i pandas.io.formats.style.Styler.to_excel PR01\ - -i pandas.io.formats.style.Styler.to_string SA01\ - -i pandas.io.formats.style.Styler.use RT03\ - -i pandas.io.json.build_table_schema PR07,RT03,SA01\ - -i pandas.io.stata.StataReader.data_label SA01\ - -i pandas.io.stata.StataReader.value_labels RT03,SA01\ - -i pandas.io.stata.StataReader.variable_labels RT03,SA01\ - -i pandas.io.stata.StataWriter.write_file SA01\ - -i pandas.json_normalize RT03,SA01\ - -i pandas.merge PR07\ - -i pandas.merge_asof PR07,RT03\ - -i pandas.merge_ordered PR07\ - -i pandas.option_context SA01\ - -i pandas.period_range RT03,SA01\ - -i pandas.pivot PR07\ - -i pandas.pivot_table PR07\ - -i pandas.plotting.andrews_curves RT03,SA01\ - -i pandas.plotting.autocorrelation_plot RT03,SA01\ - -i pandas.plotting.lag_plot RT03,SA01\ - -i pandas.plotting.parallel_coordinates PR07,RT03,SA01\ - -i pandas.plotting.plot_params SA01\ - -i pandas.plotting.scatter_matrix PR07,SA01\ - -i pandas.plotting.table PR07,RT03,SA01\ - -i pandas.qcut PR07,SA01\ - -i pandas.read_feather SA01\ - -i pandas.read_orc SA01\ - -i pandas.read_sas SA01\ - -i pandas.read_spss SA01\ - -i pandas.reset_option SA01\ - -i pandas.set_eng_float_format RT03,SA01\ - -i pandas.set_option SA01\ - -i pandas.show_versions SA01\ - -i pandas.test SA01\ - -i pandas.testing.assert_extension_array_equal SA01\ - -i pandas.testing.assert_index_equal PR07,SA01\ - -i pandas.testing.assert_series_equal PR07,SA01\ - -i pandas.timedelta_range SA01\ - -i pandas.tseries.api.guess_datetime_format SA01\ - -i pandas.tseries.offsets.BDay PR02,SA01\ - -i pandas.tseries.offsets.BMonthBegin PR02\ - -i pandas.tseries.offsets.BMonthEnd PR02\ - -i pandas.tseries.offsets.BQuarterBegin PR02\ - -i pandas.tseries.offsets.BQuarterBegin.copy SA01\ - -i pandas.tseries.offsets.BQuarterBegin.freqstr SA01\ - -i pandas.tseries.offsets.BQuarterBegin.is_on_offset GL08\ - -i pandas.tseries.offsets.BQuarterBegin.kwds SA01\ - -i pandas.tseries.offsets.BQuarterBegin.n GL08\ - -i pandas.tseries.offsets.BQuarterBegin.name SA01\ - -i pandas.tseries.offsets.BQuarterBegin.nanos GL08\ - -i pandas.tseries.offsets.BQuarterBegin.normalize GL08\ - -i pandas.tseries.offsets.BQuarterBegin.rule_code GL08\ - -i pandas.tseries.offsets.BQuarterBegin.startingMonth GL08\ - -i pandas.tseries.offsets.BQuarterEnd PR02\ - -i pandas.tseries.offsets.BQuarterEnd.copy SA01\ - -i pandas.tseries.offsets.BQuarterEnd.freqstr SA01\ - -i pandas.tseries.offsets.BQuarterEnd.is_on_offset GL08\ - -i pandas.tseries.offsets.BQuarterEnd.kwds SA01\ - -i pandas.tseries.offsets.BQuarterEnd.n GL08\ - -i pandas.tseries.offsets.BQuarterEnd.name SA01\ - -i pandas.tseries.offsets.BQuarterEnd.nanos GL08\ - -i pandas.tseries.offsets.BQuarterEnd.normalize GL08\ - -i pandas.tseries.offsets.BQuarterEnd.rule_code GL08\ - -i pandas.tseries.offsets.BQuarterEnd.startingMonth GL08\ - -i pandas.tseries.offsets.BYearBegin PR02\ - -i pandas.tseries.offsets.BYearBegin.copy SA01\ - -i pandas.tseries.offsets.BYearBegin.freqstr SA01\ - -i pandas.tseries.offsets.BYearBegin.is_on_offset GL08\ - -i pandas.tseries.offsets.BYearBegin.kwds SA01\ - -i pandas.tseries.offsets.BYearBegin.month GL08\ - -i pandas.tseries.offsets.BYearBegin.n GL08\ - -i pandas.tseries.offsets.BYearBegin.name SA01\ - -i pandas.tseries.offsets.BYearBegin.nanos GL08\ - -i pandas.tseries.offsets.BYearBegin.normalize GL08\ - -i pandas.tseries.offsets.BYearBegin.rule_code GL08\ - -i pandas.tseries.offsets.BYearEnd PR02\ - -i pandas.tseries.offsets.BYearEnd.copy SA01\ - -i pandas.tseries.offsets.BYearEnd.freqstr SA01\ - -i pandas.tseries.offsets.BYearEnd.is_on_offset GL08\ - -i pandas.tseries.offsets.BYearEnd.kwds SA01\ - -i pandas.tseries.offsets.BYearEnd.month GL08\ - -i pandas.tseries.offsets.BYearEnd.n GL08\ - -i pandas.tseries.offsets.BYearEnd.name SA01\ - -i pandas.tseries.offsets.BYearEnd.nanos GL08\ - -i pandas.tseries.offsets.BYearEnd.normalize GL08\ - -i pandas.tseries.offsets.BYearEnd.rule_code GL08\ - -i pandas.tseries.offsets.BusinessDay PR02,SA01\ - -i pandas.tseries.offsets.BusinessDay.calendar GL08\ - -i pandas.tseries.offsets.BusinessDay.copy SA01\ - -i pandas.tseries.offsets.BusinessDay.freqstr SA01\ - -i pandas.tseries.offsets.BusinessDay.holidays GL08\ - -i pandas.tseries.offsets.BusinessDay.is_on_offset GL08\ - -i pandas.tseries.offsets.BusinessDay.kwds SA01\ - -i pandas.tseries.offsets.BusinessDay.n GL08\ - -i pandas.tseries.offsets.BusinessDay.name SA01\ - -i pandas.tseries.offsets.BusinessDay.nanos GL08\ - -i pandas.tseries.offsets.BusinessDay.normalize GL08\ - -i pandas.tseries.offsets.BusinessDay.rule_code GL08\ - -i pandas.tseries.offsets.BusinessDay.weekmask GL08\ - -i pandas.tseries.offsets.BusinessHour PR02,SA01\ - -i pandas.tseries.offsets.BusinessHour.calendar GL08\ - -i pandas.tseries.offsets.BusinessHour.copy SA01\ - -i pandas.tseries.offsets.BusinessHour.end GL08\ - -i pandas.tseries.offsets.BusinessHour.freqstr SA01\ - -i pandas.tseries.offsets.BusinessHour.holidays GL08\ - -i pandas.tseries.offsets.BusinessHour.is_on_offset GL08\ - -i pandas.tseries.offsets.BusinessHour.kwds SA01\ - -i pandas.tseries.offsets.BusinessHour.n GL08\ - -i pandas.tseries.offsets.BusinessHour.name SA01\ - -i pandas.tseries.offsets.BusinessHour.nanos GL08\ - -i pandas.tseries.offsets.BusinessHour.normalize GL08\ - -i pandas.tseries.offsets.BusinessHour.rule_code GL08\ - -i pandas.tseries.offsets.BusinessHour.start GL08\ - -i pandas.tseries.offsets.BusinessHour.weekmask GL08\ - -i pandas.tseries.offsets.BusinessMonthBegin PR02\ - -i pandas.tseries.offsets.BusinessMonthBegin.copy SA01\ - -i pandas.tseries.offsets.BusinessMonthBegin.freqstr SA01\ - -i pandas.tseries.offsets.BusinessMonthBegin.is_on_offset GL08\ - -i pandas.tseries.offsets.BusinessMonthBegin.kwds SA01\ - -i pandas.tseries.offsets.BusinessMonthBegin.n GL08\ - -i pandas.tseries.offsets.BusinessMonthBegin.name SA01\ - -i pandas.tseries.offsets.BusinessMonthBegin.nanos GL08\ - -i pandas.tseries.offsets.BusinessMonthBegin.normalize GL08\ - -i pandas.tseries.offsets.BusinessMonthBegin.rule_code GL08\ - -i pandas.tseries.offsets.BusinessMonthEnd PR02\ - -i pandas.tseries.offsets.BusinessMonthEnd.copy SA01\ - -i pandas.tseries.offsets.BusinessMonthEnd.freqstr SA01\ - -i pandas.tseries.offsets.BusinessMonthEnd.is_on_offset GL08\ - -i pandas.tseries.offsets.BusinessMonthEnd.kwds SA01\ - -i pandas.tseries.offsets.BusinessMonthEnd.n GL08\ - -i pandas.tseries.offsets.BusinessMonthEnd.name SA01\ - -i pandas.tseries.offsets.BusinessMonthEnd.nanos GL08\ - -i pandas.tseries.offsets.BusinessMonthEnd.normalize GL08\ - -i pandas.tseries.offsets.BusinessMonthEnd.rule_code GL08\ - -i pandas.tseries.offsets.CBMonthBegin PR02\ - -i pandas.tseries.offsets.CBMonthEnd PR02\ - -i pandas.tseries.offsets.CDay PR02,SA01\ - -i pandas.tseries.offsets.CustomBusinessDay PR02,SA01\ - -i pandas.tseries.offsets.CustomBusinessDay.calendar GL08\ - -i pandas.tseries.offsets.CustomBusinessDay.copy SA01\ - -i pandas.tseries.offsets.CustomBusinessDay.freqstr SA01\ - -i pandas.tseries.offsets.CustomBusinessDay.holidays GL08\ - -i pandas.tseries.offsets.CustomBusinessDay.is_on_offset GL08\ - -i pandas.tseries.offsets.CustomBusinessDay.kwds SA01\ - -i pandas.tseries.offsets.CustomBusinessDay.n GL08\ - -i pandas.tseries.offsets.CustomBusinessDay.name SA01\ - -i pandas.tseries.offsets.CustomBusinessDay.nanos GL08\ - -i pandas.tseries.offsets.CustomBusinessDay.normalize GL08\ - -i pandas.tseries.offsets.CustomBusinessDay.rule_code GL08\ - -i pandas.tseries.offsets.CustomBusinessDay.weekmask GL08\ - -i pandas.tseries.offsets.CustomBusinessHour PR02,SA01\ - -i pandas.tseries.offsets.CustomBusinessHour.calendar GL08\ - -i pandas.tseries.offsets.CustomBusinessHour.copy SA01\ - -i pandas.tseries.offsets.CustomBusinessHour.end GL08\ - -i pandas.tseries.offsets.CustomBusinessHour.freqstr SA01\ - -i pandas.tseries.offsets.CustomBusinessHour.holidays GL08\ - -i pandas.tseries.offsets.CustomBusinessHour.is_on_offset GL08\ - -i pandas.tseries.offsets.CustomBusinessHour.kwds SA01\ - -i pandas.tseries.offsets.CustomBusinessHour.n GL08\ - -i pandas.tseries.offsets.CustomBusinessHour.name SA01\ - -i pandas.tseries.offsets.CustomBusinessHour.nanos GL08\ - -i pandas.tseries.offsets.CustomBusinessHour.normalize GL08\ - -i pandas.tseries.offsets.CustomBusinessHour.rule_code GL08\ - -i pandas.tseries.offsets.CustomBusinessHour.start GL08\ - -i pandas.tseries.offsets.CustomBusinessHour.weekmask GL08\ - -i pandas.tseries.offsets.CustomBusinessMonthBegin PR02\ - -i pandas.tseries.offsets.CustomBusinessMonthBegin.calendar GL08\ - -i pandas.tseries.offsets.CustomBusinessMonthBegin.copy SA01\ - -i pandas.tseries.offsets.CustomBusinessMonthBegin.freqstr SA01\ - -i pandas.tseries.offsets.CustomBusinessMonthBegin.holidays GL08\ - -i pandas.tseries.offsets.CustomBusinessMonthBegin.is_on_offset SA01\ - -i pandas.tseries.offsets.CustomBusinessMonthBegin.kwds SA01\ - -i pandas.tseries.offsets.CustomBusinessMonthBegin.m_offset GL08\ - -i pandas.tseries.offsets.CustomBusinessMonthBegin.n GL08\ - -i pandas.tseries.offsets.CustomBusinessMonthBegin.name SA01\ - -i pandas.tseries.offsets.CustomBusinessMonthBegin.nanos GL08\ - -i pandas.tseries.offsets.CustomBusinessMonthBegin.normalize GL08\ - -i pandas.tseries.offsets.CustomBusinessMonthBegin.rule_code GL08\ - -i pandas.tseries.offsets.CustomBusinessMonthBegin.weekmask GL08\ - -i pandas.tseries.offsets.CustomBusinessMonthEnd PR02\ - -i pandas.tseries.offsets.CustomBusinessMonthEnd.calendar GL08\ - -i pandas.tseries.offsets.CustomBusinessMonthEnd.copy SA01\ - -i pandas.tseries.offsets.CustomBusinessMonthEnd.freqstr SA01\ - -i pandas.tseries.offsets.CustomBusinessMonthEnd.holidays GL08\ - -i pandas.tseries.offsets.CustomBusinessMonthEnd.is_on_offset SA01\ - -i pandas.tseries.offsets.CustomBusinessMonthEnd.kwds SA01\ - -i pandas.tseries.offsets.CustomBusinessMonthEnd.m_offset GL08\ - -i pandas.tseries.offsets.CustomBusinessMonthEnd.n GL08\ - -i pandas.tseries.offsets.CustomBusinessMonthEnd.name SA01\ - -i pandas.tseries.offsets.CustomBusinessMonthEnd.nanos GL08\ - -i pandas.tseries.offsets.CustomBusinessMonthEnd.normalize GL08\ - -i pandas.tseries.offsets.CustomBusinessMonthEnd.rule_code GL08\ - -i pandas.tseries.offsets.CustomBusinessMonthEnd.weekmask GL08\ - -i pandas.tseries.offsets.DateOffset PR02\ - -i pandas.tseries.offsets.DateOffset.copy SA01\ - -i pandas.tseries.offsets.DateOffset.freqstr SA01\ - -i pandas.tseries.offsets.DateOffset.is_on_offset GL08\ - -i pandas.tseries.offsets.DateOffset.kwds SA01\ - -i pandas.tseries.offsets.DateOffset.n GL08\ - -i pandas.tseries.offsets.DateOffset.name SA01\ - -i pandas.tseries.offsets.DateOffset.nanos GL08\ - -i pandas.tseries.offsets.DateOffset.normalize GL08\ - -i pandas.tseries.offsets.DateOffset.rule_code GL08\ - -i pandas.tseries.offsets.Day PR02\ - -i pandas.tseries.offsets.Day.copy SA01\ - -i pandas.tseries.offsets.Day.delta GL08\ - -i pandas.tseries.offsets.Day.freqstr SA01\ - -i pandas.tseries.offsets.Day.is_on_offset GL08\ - -i pandas.tseries.offsets.Day.kwds SA01\ - -i pandas.tseries.offsets.Day.n GL08\ - -i pandas.tseries.offsets.Day.name SA01\ - -i pandas.tseries.offsets.Day.nanos SA01\ - -i pandas.tseries.offsets.Day.normalize GL08\ - -i pandas.tseries.offsets.Day.rule_code GL08\ - -i pandas.tseries.offsets.Easter PR02\ - -i pandas.tseries.offsets.Easter.copy SA01\ - -i pandas.tseries.offsets.Easter.freqstr SA01\ - -i pandas.tseries.offsets.Easter.is_on_offset GL08\ - -i pandas.tseries.offsets.Easter.kwds SA01\ - -i pandas.tseries.offsets.Easter.n GL08\ - -i pandas.tseries.offsets.Easter.name SA01\ - -i pandas.tseries.offsets.Easter.nanos GL08\ - -i pandas.tseries.offsets.Easter.normalize GL08\ - -i pandas.tseries.offsets.Easter.rule_code GL08\ - -i pandas.tseries.offsets.FY5253 PR02\ - -i pandas.tseries.offsets.FY5253.copy SA01\ - -i pandas.tseries.offsets.FY5253.freqstr SA01\ - -i pandas.tseries.offsets.FY5253.get_rule_code_suffix GL08\ - -i pandas.tseries.offsets.FY5253.get_year_end GL08\ - -i pandas.tseries.offsets.FY5253.is_on_offset GL08\ - -i pandas.tseries.offsets.FY5253.kwds SA01\ - -i pandas.tseries.offsets.FY5253.n GL08\ - -i pandas.tseries.offsets.FY5253.name SA01\ - -i pandas.tseries.offsets.FY5253.nanos GL08\ - -i pandas.tseries.offsets.FY5253.normalize GL08\ - -i pandas.tseries.offsets.FY5253.rule_code GL08\ - -i pandas.tseries.offsets.FY5253.startingMonth GL08\ - -i pandas.tseries.offsets.FY5253.variation GL08\ - -i pandas.tseries.offsets.FY5253.weekday GL08\ - -i pandas.tseries.offsets.FY5253Quarter PR02\ - -i pandas.tseries.offsets.FY5253Quarter.copy SA01\ - -i pandas.tseries.offsets.FY5253Quarter.freqstr SA01\ - -i pandas.tseries.offsets.FY5253Quarter.get_rule_code_suffix GL08\ - -i pandas.tseries.offsets.FY5253Quarter.get_weeks GL08\ - -i pandas.tseries.offsets.FY5253Quarter.is_on_offset GL08\ - -i pandas.tseries.offsets.FY5253Quarter.kwds SA01\ - -i pandas.tseries.offsets.FY5253Quarter.n GL08\ - -i pandas.tseries.offsets.FY5253Quarter.name SA01\ - -i pandas.tseries.offsets.FY5253Quarter.nanos GL08\ - -i pandas.tseries.offsets.FY5253Quarter.normalize GL08\ - -i pandas.tseries.offsets.FY5253Quarter.qtr_with_extra_week GL08\ - -i pandas.tseries.offsets.FY5253Quarter.rule_code GL08\ - -i pandas.tseries.offsets.FY5253Quarter.startingMonth GL08\ - -i pandas.tseries.offsets.FY5253Quarter.variation GL08\ - -i pandas.tseries.offsets.FY5253Quarter.weekday GL08\ - -i pandas.tseries.offsets.FY5253Quarter.year_has_extra_week GL08\ - -i pandas.tseries.offsets.Hour PR02\ - -i pandas.tseries.offsets.Hour.copy SA01\ - -i pandas.tseries.offsets.Hour.delta GL08\ - -i pandas.tseries.offsets.Hour.freqstr SA01\ - -i pandas.tseries.offsets.Hour.is_on_offset GL08\ - -i pandas.tseries.offsets.Hour.kwds SA01\ - -i pandas.tseries.offsets.Hour.n GL08\ - -i pandas.tseries.offsets.Hour.name SA01\ - -i pandas.tseries.offsets.Hour.nanos SA01\ - -i pandas.tseries.offsets.Hour.normalize GL08\ - -i pandas.tseries.offsets.Hour.rule_code GL08\ - -i pandas.tseries.offsets.LastWeekOfMonth PR02,SA01\ - -i pandas.tseries.offsets.LastWeekOfMonth.copy SA01\ - -i pandas.tseries.offsets.LastWeekOfMonth.freqstr SA01\ - -i pandas.tseries.offsets.LastWeekOfMonth.is_on_offset GL08\ - -i pandas.tseries.offsets.LastWeekOfMonth.kwds SA01\ - -i pandas.tseries.offsets.LastWeekOfMonth.n GL08\ - -i pandas.tseries.offsets.LastWeekOfMonth.name SA01\ - -i pandas.tseries.offsets.LastWeekOfMonth.nanos GL08\ - -i pandas.tseries.offsets.LastWeekOfMonth.normalize GL08\ - -i pandas.tseries.offsets.LastWeekOfMonth.rule_code GL08\ - -i pandas.tseries.offsets.LastWeekOfMonth.week GL08\ - -i pandas.tseries.offsets.LastWeekOfMonth.weekday GL08\ - -i pandas.tseries.offsets.Micro PR02\ - -i pandas.tseries.offsets.Micro.copy SA01\ - -i pandas.tseries.offsets.Micro.delta GL08\ - -i pandas.tseries.offsets.Micro.freqstr SA01\ - -i pandas.tseries.offsets.Micro.is_on_offset GL08\ - -i pandas.tseries.offsets.Micro.kwds SA01\ - -i pandas.tseries.offsets.Micro.n GL08\ - -i pandas.tseries.offsets.Micro.name SA01\ - -i pandas.tseries.offsets.Micro.nanos SA01\ - -i pandas.tseries.offsets.Micro.normalize GL08\ - -i pandas.tseries.offsets.Micro.rule_code GL08\ - -i pandas.tseries.offsets.Milli PR02\ - -i pandas.tseries.offsets.Milli.copy SA01\ - -i pandas.tseries.offsets.Milli.delta GL08\ - -i pandas.tseries.offsets.Milli.freqstr SA01\ - -i pandas.tseries.offsets.Milli.is_on_offset GL08\ - -i pandas.tseries.offsets.Milli.kwds SA01\ - -i pandas.tseries.offsets.Milli.n GL08\ - -i pandas.tseries.offsets.Milli.name SA01\ - -i pandas.tseries.offsets.Milli.nanos SA01\ - -i pandas.tseries.offsets.Milli.normalize GL08\ - -i pandas.tseries.offsets.Milli.rule_code GL08\ - -i pandas.tseries.offsets.Minute PR02\ - -i pandas.tseries.offsets.Minute.copy SA01\ - -i pandas.tseries.offsets.Minute.delta GL08\ - -i pandas.tseries.offsets.Minute.freqstr SA01\ - -i pandas.tseries.offsets.Minute.is_on_offset GL08\ - -i pandas.tseries.offsets.Minute.kwds SA01\ - -i pandas.tseries.offsets.Minute.n GL08\ - -i pandas.tseries.offsets.Minute.name SA01\ - -i pandas.tseries.offsets.Minute.nanos SA01\ - -i pandas.tseries.offsets.Minute.normalize GL08\ - -i pandas.tseries.offsets.Minute.rule_code GL08\ - -i pandas.tseries.offsets.MonthBegin PR02\ - -i pandas.tseries.offsets.MonthBegin.copy SA01\ - -i pandas.tseries.offsets.MonthBegin.freqstr SA01\ - -i pandas.tseries.offsets.MonthBegin.is_on_offset GL08\ - -i pandas.tseries.offsets.MonthBegin.kwds SA01\ - -i pandas.tseries.offsets.MonthBegin.n GL08\ - -i pandas.tseries.offsets.MonthBegin.name SA01\ - -i pandas.tseries.offsets.MonthBegin.nanos GL08\ - -i pandas.tseries.offsets.MonthBegin.normalize GL08\ - -i pandas.tseries.offsets.MonthBegin.rule_code GL08\ - -i pandas.tseries.offsets.MonthEnd PR02\ - -i pandas.tseries.offsets.MonthEnd.copy SA01\ - -i pandas.tseries.offsets.MonthEnd.freqstr SA01\ - -i pandas.tseries.offsets.MonthEnd.is_on_offset GL08\ - -i pandas.tseries.offsets.MonthEnd.kwds SA01\ - -i pandas.tseries.offsets.MonthEnd.n GL08\ - -i pandas.tseries.offsets.MonthEnd.name SA01\ - -i pandas.tseries.offsets.MonthEnd.nanos GL08\ - -i pandas.tseries.offsets.MonthEnd.normalize GL08\ - -i pandas.tseries.offsets.MonthEnd.rule_code GL08\ - -i pandas.tseries.offsets.Nano PR02\ - -i pandas.tseries.offsets.Nano.copy SA01\ - -i pandas.tseries.offsets.Nano.delta GL08\ - -i pandas.tseries.offsets.Nano.freqstr SA01\ - -i pandas.tseries.offsets.Nano.is_on_offset GL08\ - -i pandas.tseries.offsets.Nano.kwds SA01\ - -i pandas.tseries.offsets.Nano.n GL08\ - -i pandas.tseries.offsets.Nano.name SA01\ - -i pandas.tseries.offsets.Nano.nanos SA01\ - -i pandas.tseries.offsets.Nano.normalize GL08\ - -i pandas.tseries.offsets.Nano.rule_code GL08\ - -i pandas.tseries.offsets.QuarterBegin PR02\ - -i pandas.tseries.offsets.QuarterBegin.copy SA01\ - -i pandas.tseries.offsets.QuarterBegin.freqstr SA01\ - -i pandas.tseries.offsets.QuarterBegin.is_on_offset GL08\ - -i pandas.tseries.offsets.QuarterBegin.kwds SA01\ - -i pandas.tseries.offsets.QuarterBegin.n GL08\ - -i pandas.tseries.offsets.QuarterBegin.name SA01\ - -i pandas.tseries.offsets.QuarterBegin.nanos GL08\ - -i pandas.tseries.offsets.QuarterBegin.normalize GL08\ - -i pandas.tseries.offsets.QuarterBegin.rule_code GL08\ - -i pandas.tseries.offsets.QuarterBegin.startingMonth GL08\ - -i pandas.tseries.offsets.QuarterEnd PR02\ - -i pandas.tseries.offsets.QuarterEnd.copy SA01\ - -i pandas.tseries.offsets.QuarterEnd.freqstr SA01\ - -i pandas.tseries.offsets.QuarterEnd.is_on_offset GL08\ - -i pandas.tseries.offsets.QuarterEnd.kwds SA01\ - -i pandas.tseries.offsets.QuarterEnd.n GL08\ - -i pandas.tseries.offsets.QuarterEnd.name SA01\ - -i pandas.tseries.offsets.QuarterEnd.nanos GL08\ - -i pandas.tseries.offsets.QuarterEnd.normalize GL08\ - -i pandas.tseries.offsets.QuarterEnd.rule_code GL08\ - -i pandas.tseries.offsets.QuarterEnd.startingMonth GL08\ - -i pandas.tseries.offsets.Second PR02\ - -i pandas.tseries.offsets.Second.copy SA01\ - -i pandas.tseries.offsets.Second.delta GL08\ - -i pandas.tseries.offsets.Second.freqstr SA01\ - -i pandas.tseries.offsets.Second.is_on_offset GL08\ - -i pandas.tseries.offsets.Second.kwds SA01\ - -i pandas.tseries.offsets.Second.n GL08\ - -i pandas.tseries.offsets.Second.name SA01\ - -i pandas.tseries.offsets.Second.nanos SA01\ - -i pandas.tseries.offsets.Second.normalize GL08\ - -i pandas.tseries.offsets.Second.rule_code GL08\ - -i pandas.tseries.offsets.SemiMonthBegin PR02,SA01\ - -i pandas.tseries.offsets.SemiMonthBegin.copy SA01\ - -i pandas.tseries.offsets.SemiMonthBegin.day_of_month GL08\ - -i pandas.tseries.offsets.SemiMonthBegin.freqstr SA01\ - -i pandas.tseries.offsets.SemiMonthBegin.is_on_offset GL08\ - -i pandas.tseries.offsets.SemiMonthBegin.kwds SA01\ - -i pandas.tseries.offsets.SemiMonthBegin.n GL08\ - -i pandas.tseries.offsets.SemiMonthBegin.name SA01\ - -i pandas.tseries.offsets.SemiMonthBegin.nanos GL08\ - -i pandas.tseries.offsets.SemiMonthBegin.normalize GL08\ - -i pandas.tseries.offsets.SemiMonthBegin.rule_code GL08\ - -i pandas.tseries.offsets.SemiMonthEnd PR02,SA01\ - -i pandas.tseries.offsets.SemiMonthEnd.copy SA01\ - -i pandas.tseries.offsets.SemiMonthEnd.day_of_month GL08\ - -i pandas.tseries.offsets.SemiMonthEnd.freqstr SA01\ - -i pandas.tseries.offsets.SemiMonthEnd.is_on_offset GL08\ - -i pandas.tseries.offsets.SemiMonthEnd.kwds SA01\ - -i pandas.tseries.offsets.SemiMonthEnd.n GL08\ - -i pandas.tseries.offsets.SemiMonthEnd.name SA01\ - -i pandas.tseries.offsets.SemiMonthEnd.nanos GL08\ - -i pandas.tseries.offsets.SemiMonthEnd.normalize GL08\ - -i pandas.tseries.offsets.SemiMonthEnd.rule_code GL08\ - -i pandas.tseries.offsets.Tick GL08\ - -i pandas.tseries.offsets.Tick.copy SA01\ - -i pandas.tseries.offsets.Tick.delta GL08\ - -i pandas.tseries.offsets.Tick.freqstr SA01\ - -i pandas.tseries.offsets.Tick.is_on_offset GL08\ - -i pandas.tseries.offsets.Tick.kwds SA01\ - -i pandas.tseries.offsets.Tick.n GL08\ - -i pandas.tseries.offsets.Tick.name SA01\ - -i pandas.tseries.offsets.Tick.nanos SA01\ - -i pandas.tseries.offsets.Tick.normalize GL08\ - -i pandas.tseries.offsets.Tick.rule_code GL08\ - -i pandas.tseries.offsets.Week PR02\ - -i pandas.tseries.offsets.Week.copy SA01\ - -i pandas.tseries.offsets.Week.freqstr SA01\ - -i pandas.tseries.offsets.Week.is_on_offset GL08\ - -i pandas.tseries.offsets.Week.kwds SA01\ - -i pandas.tseries.offsets.Week.n GL08\ - -i pandas.tseries.offsets.Week.name SA01\ - -i pandas.tseries.offsets.Week.nanos GL08\ - -i pandas.tseries.offsets.Week.normalize GL08\ - -i pandas.tseries.offsets.Week.rule_code GL08\ - -i pandas.tseries.offsets.Week.weekday GL08\ - -i pandas.tseries.offsets.WeekOfMonth PR02,SA01\ - -i pandas.tseries.offsets.WeekOfMonth.copy SA01\ - -i pandas.tseries.offsets.WeekOfMonth.freqstr SA01\ - -i pandas.tseries.offsets.WeekOfMonth.is_on_offset GL08\ - -i pandas.tseries.offsets.WeekOfMonth.kwds SA01\ - -i pandas.tseries.offsets.WeekOfMonth.n GL08\ - -i pandas.tseries.offsets.WeekOfMonth.name SA01\ - -i pandas.tseries.offsets.WeekOfMonth.nanos GL08\ - -i pandas.tseries.offsets.WeekOfMonth.normalize GL08\ - -i pandas.tseries.offsets.WeekOfMonth.rule_code GL08\ - -i pandas.tseries.offsets.WeekOfMonth.week GL08\ - -i pandas.tseries.offsets.WeekOfMonth.weekday GL08\ - -i pandas.tseries.offsets.YearBegin PR02\ - -i pandas.tseries.offsets.YearBegin.copy SA01\ - -i pandas.tseries.offsets.YearBegin.freqstr SA01\ - -i pandas.tseries.offsets.YearBegin.is_on_offset GL08\ - -i pandas.tseries.offsets.YearBegin.kwds SA01\ - -i pandas.tseries.offsets.YearBegin.month GL08\ - -i pandas.tseries.offsets.YearBegin.n GL08\ - -i pandas.tseries.offsets.YearBegin.name SA01\ - -i pandas.tseries.offsets.YearBegin.nanos GL08\ - -i pandas.tseries.offsets.YearBegin.normalize GL08\ - -i pandas.tseries.offsets.YearBegin.rule_code GL08\ - -i pandas.tseries.offsets.YearEnd PR02\ - -i pandas.tseries.offsets.YearEnd.copy SA01\ - -i pandas.tseries.offsets.YearEnd.freqstr SA01\ - -i pandas.tseries.offsets.YearEnd.is_on_offset GL08\ - -i pandas.tseries.offsets.YearEnd.kwds SA01\ - -i pandas.tseries.offsets.YearEnd.month GL08\ - -i pandas.tseries.offsets.YearEnd.n GL08\ - -i pandas.tseries.offsets.YearEnd.name SA01\ - -i pandas.tseries.offsets.YearEnd.nanos GL08\ - -i pandas.tseries.offsets.YearEnd.normalize GL08\ - -i pandas.tseries.offsets.YearEnd.rule_code GL08\ - -i pandas.unique PR07\ - -i pandas.util.hash_array PR07,SA01\ - -i pandas.util.hash_pandas_object PR07,SA01 # There should be no backslash in the final line, please keep this comment in the last ignored function + -i ES01 `# For now it is ok if docstrings are missing the extended summary` \ + -i "pandas.Series.dt PR01" `# Accessors are implemented as classes, but we do not document the Parameters section` \ + -i "pandas.Categorical.__array__ SA01" \ + -i "pandas.Categorical.codes SA01" \ + -i "pandas.Categorical.dtype SA01" \ + -i "pandas.Categorical.from_codes SA01" \ + -i "pandas.Categorical.ordered SA01" \ + -i "pandas.CategoricalDtype.categories SA01" \ + -i "pandas.CategoricalDtype.ordered SA01" \ + -i "pandas.CategoricalIndex.codes SA01" \ + -i "pandas.CategoricalIndex.ordered SA01" \ + -i "pandas.DataFrame.__dataframe__ SA01" \ + -i "pandas.DataFrame.__iter__ SA01" \ + -i "pandas.DataFrame.assign SA01" \ + -i "pandas.DataFrame.at_time PR01" \ + -i "pandas.DataFrame.axes SA01" \ + -i "pandas.DataFrame.backfill PR01,SA01" \ + -i "pandas.DataFrame.bfill SA01" \ + -i "pandas.DataFrame.columns SA01" \ + -i "pandas.DataFrame.copy SA01" \ + -i "pandas.DataFrame.droplevel SA01" \ + -i "pandas.DataFrame.dtypes SA01" \ + -i "pandas.DataFrame.ffill SA01" \ + -i "pandas.DataFrame.first_valid_index SA01" \ + -i "pandas.DataFrame.get SA01" \ + -i "pandas.DataFrame.hist RT03" \ + -i "pandas.DataFrame.infer_objects RT03" \ + -i "pandas.DataFrame.keys SA01" \ + -i "pandas.DataFrame.kurt RT03,SA01" \ + -i "pandas.DataFrame.kurtosis RT03,SA01" \ + -i "pandas.DataFrame.last_valid_index SA01" \ + -i "pandas.DataFrame.mask RT03" \ + -i "pandas.DataFrame.max RT03" \ + -i "pandas.DataFrame.mean RT03,SA01" \ + -i "pandas.DataFrame.median RT03,SA01" \ + -i "pandas.DataFrame.min RT03" \ + -i "pandas.DataFrame.pad PR01,SA01" \ + -i "pandas.DataFrame.plot PR02,SA01" \ + -i "pandas.DataFrame.pop SA01" \ + -i "pandas.DataFrame.prod RT03" \ + -i "pandas.DataFrame.product RT03" \ + -i "pandas.DataFrame.reorder_levels SA01" \ + -i "pandas.DataFrame.sem PR01,RT03,SA01" \ + -i "pandas.DataFrame.skew RT03,SA01" \ + -i "pandas.DataFrame.sparse PR01,SA01" \ + -i "pandas.DataFrame.sparse.density SA01" \ + -i "pandas.DataFrame.sparse.from_spmatrix SA01" \ + -i "pandas.DataFrame.sparse.to_coo SA01" \ + -i "pandas.DataFrame.sparse.to_dense SA01" \ + -i "pandas.DataFrame.std PR01,RT03,SA01" \ + -i "pandas.DataFrame.sum RT03" \ + -i "pandas.DataFrame.swapaxes PR01,SA01" \ + -i "pandas.DataFrame.swaplevel SA01" \ + -i "pandas.DataFrame.to_feather SA01" \ + -i "pandas.DataFrame.to_markdown SA01" \ + -i "pandas.DataFrame.to_parquet RT03" \ + -i "pandas.DataFrame.to_period SA01" \ + -i "pandas.DataFrame.to_timestamp SA01" \ + -i "pandas.DataFrame.tz_convert SA01" \ + -i "pandas.DataFrame.tz_localize SA01" \ + -i "pandas.DataFrame.unstack RT03" \ + -i "pandas.DataFrame.value_counts RT03" \ + -i "pandas.DataFrame.var PR01,RT03,SA01" \ + -i "pandas.DataFrame.where RT03" \ + -i "pandas.DatetimeIndex.ceil SA01" \ + -i "pandas.DatetimeIndex.date SA01" \ + -i "pandas.DatetimeIndex.day SA01" \ + -i "pandas.DatetimeIndex.day_name SA01" \ + -i "pandas.DatetimeIndex.day_of_year SA01" \ + -i "pandas.DatetimeIndex.dayofyear SA01" \ + -i "pandas.DatetimeIndex.floor SA01" \ + -i "pandas.DatetimeIndex.freqstr SA01" \ + -i "pandas.DatetimeIndex.hour SA01" \ + -i "pandas.DatetimeIndex.indexer_at_time PR01,RT03" \ + -i "pandas.DatetimeIndex.indexer_between_time RT03" \ + -i "pandas.DatetimeIndex.inferred_freq SA01" \ + -i "pandas.DatetimeIndex.is_leap_year SA01" \ + -i "pandas.DatetimeIndex.microsecond SA01" \ + -i "pandas.DatetimeIndex.minute SA01" \ + -i "pandas.DatetimeIndex.month SA01" \ + -i "pandas.DatetimeIndex.month_name SA01" \ + -i "pandas.DatetimeIndex.nanosecond SA01" \ + -i "pandas.DatetimeIndex.quarter SA01" \ + -i "pandas.DatetimeIndex.round SA01" \ + -i "pandas.DatetimeIndex.second SA01" \ + -i "pandas.DatetimeIndex.snap PR01,RT03,SA01" \ + -i "pandas.DatetimeIndex.std PR01,RT03" \ + -i "pandas.DatetimeIndex.time SA01" \ + -i "pandas.DatetimeIndex.timetz SA01" \ + -i "pandas.DatetimeIndex.to_period RT03" \ + -i "pandas.DatetimeIndex.to_pydatetime RT03,SA01" \ + -i "pandas.DatetimeIndex.tz SA01" \ + -i "pandas.DatetimeIndex.tz_convert RT03" \ + -i "pandas.DatetimeIndex.year SA01" \ + -i "pandas.DatetimeTZDtype SA01" \ + -i "pandas.DatetimeTZDtype.tz SA01" \ + -i "pandas.DatetimeTZDtype.unit SA01" \ + -i "pandas.ExcelFile PR01,SA01" \ + -i "pandas.ExcelFile.parse PR01,SA01" \ + -i "pandas.ExcelWriter SA01" \ + -i "pandas.Float32Dtype SA01" \ + -i "pandas.Float64Dtype SA01" \ + -i "pandas.Grouper PR02,SA01" \ + -i "pandas.HDFStore.append PR01,SA01" \ + -i "pandas.HDFStore.get SA01" \ + -i "pandas.HDFStore.groups SA01" \ + -i "pandas.HDFStore.info RT03,SA01" \ + -i "pandas.HDFStore.keys SA01" \ + -i "pandas.HDFStore.put PR01,SA01" \ + -i "pandas.HDFStore.select SA01" \ + -i "pandas.HDFStore.walk SA01" \ + -i "pandas.Index PR07" \ + -i "pandas.Index.T SA01" \ + -i "pandas.Index.append PR07,RT03,SA01" \ + -i "pandas.Index.astype SA01" \ + -i "pandas.Index.copy PR07,SA01" \ + -i "pandas.Index.difference PR07,RT03,SA01" \ + -i "pandas.Index.drop PR07,SA01" \ + -i "pandas.Index.drop_duplicates RT03" \ + -i "pandas.Index.droplevel RT03,SA01" \ + -i "pandas.Index.dropna RT03,SA01" \ + -i "pandas.Index.dtype SA01" \ + -i "pandas.Index.duplicated RT03" \ + -i "pandas.Index.empty GL08" \ + -i "pandas.Index.equals SA01" \ + -i "pandas.Index.fillna RT03" \ + -i "pandas.Index.get_indexer PR07,SA01" \ + -i "pandas.Index.get_indexer_for PR01,SA01" \ + -i "pandas.Index.get_indexer_non_unique PR07,SA01" \ + -i "pandas.Index.get_loc PR07,RT03,SA01" \ + -i "pandas.Index.get_slice_bound PR07" \ + -i "pandas.Index.hasnans SA01" \ + -i "pandas.Index.identical PR01,SA01" \ + -i "pandas.Index.inferred_type SA01" \ + -i "pandas.Index.insert PR07,RT03,SA01" \ + -i "pandas.Index.intersection PR07,RT03,SA01" \ + -i "pandas.Index.item SA01" \ + -i "pandas.Index.join PR07,RT03,SA01" \ + -i "pandas.Index.map SA01" \ + -i "pandas.Index.memory_usage RT03" \ + -i "pandas.Index.name SA01" \ + -i "pandas.Index.names GL08" \ + -i "pandas.Index.nbytes SA01" \ + -i "pandas.Index.ndim SA01" \ + -i "pandas.Index.nunique RT03" \ + -i "pandas.Index.putmask PR01,RT03" \ + -i "pandas.Index.ravel PR01,RT03" \ + -i "pandas.Index.reindex PR07" \ + -i "pandas.Index.shape SA01" \ + -i "pandas.Index.size SA01" \ + -i "pandas.Index.slice_indexer PR07,RT03,SA01" \ + -i "pandas.Index.slice_locs RT03" \ + -i "pandas.Index.str PR01,SA01" \ + -i "pandas.Index.symmetric_difference PR07,RT03,SA01" \ + -i "pandas.Index.take PR01,PR07" \ + -i "pandas.Index.to_list RT03" \ + -i "pandas.Index.union PR07,RT03,SA01" \ + -i "pandas.Index.unique RT03" \ + -i "pandas.Index.value_counts RT03" \ + -i "pandas.Index.view GL08" \ + -i "pandas.Int16Dtype SA01" \ + -i "pandas.Int32Dtype SA01" \ + -i "pandas.Int64Dtype SA01" \ + -i "pandas.Int8Dtype SA01" \ + -i "pandas.Interval PR02" \ + -i "pandas.Interval.closed SA01" \ + -i "pandas.Interval.left SA01" \ + -i "pandas.Interval.mid SA01" \ + -i "pandas.Interval.right SA01" \ + -i "pandas.IntervalDtype PR01,SA01" \ + -i "pandas.IntervalDtype.subtype SA01" \ + -i "pandas.IntervalIndex.closed SA01" \ + -i "pandas.IntervalIndex.contains RT03" \ + -i "pandas.IntervalIndex.get_indexer PR07,SA01" \ + -i "pandas.IntervalIndex.get_loc PR07,RT03,SA01" \ + -i "pandas.IntervalIndex.is_non_overlapping_monotonic SA01" \ + -i "pandas.IntervalIndex.left GL08" \ + -i "pandas.IntervalIndex.length GL08" \ + -i "pandas.IntervalIndex.mid GL08" \ + -i "pandas.IntervalIndex.right GL08" \ + -i "pandas.IntervalIndex.set_closed RT03,SA01" \ + -i "pandas.IntervalIndex.to_tuples RT03,SA01" \ + -i "pandas.MultiIndex PR01" \ + -i "pandas.MultiIndex.append PR07,SA01" \ + -i "pandas.MultiIndex.copy PR07,RT03,SA01" \ + -i "pandas.MultiIndex.drop PR07,RT03,SA01" \ + -i "pandas.MultiIndex.droplevel RT03,SA01" \ + -i "pandas.MultiIndex.dtypes SA01" \ + -i "pandas.MultiIndex.get_indexer PR07,SA01" \ + -i "pandas.MultiIndex.get_level_values SA01" \ + -i "pandas.MultiIndex.get_loc PR07" \ + -i "pandas.MultiIndex.get_loc_level PR07" \ + -i "pandas.MultiIndex.levels SA01" \ + -i "pandas.MultiIndex.levshape SA01" \ + -i "pandas.MultiIndex.names SA01" \ + -i "pandas.MultiIndex.nlevels SA01" \ + -i "pandas.MultiIndex.remove_unused_levels RT03,SA01" \ + -i "pandas.MultiIndex.reorder_levels RT03,SA01" \ + -i "pandas.MultiIndex.set_codes SA01" \ + -i "pandas.MultiIndex.set_levels RT03,SA01" \ + -i "pandas.MultiIndex.sortlevel PR07,SA01" \ + -i "pandas.MultiIndex.to_frame RT03" \ + -i "pandas.MultiIndex.truncate SA01" \ + -i "pandas.NA SA01" \ + -i "pandas.NaT SA01" \ + -i "pandas.NamedAgg SA01" \ + -i "pandas.Period SA01" \ + -i "pandas.Period.asfreq SA01" \ + -i "pandas.Period.freq GL08" \ + -i "pandas.Period.freqstr SA01" \ + -i "pandas.Period.is_leap_year SA01" \ + -i "pandas.Period.month SA01" \ + -i "pandas.Period.now SA01" \ + -i "pandas.Period.ordinal GL08" \ + -i "pandas.Period.quarter SA01" \ + -i "pandas.Period.strftime PR01,SA01" \ + -i "pandas.Period.to_timestamp SA01" \ + -i "pandas.Period.year SA01" \ + -i "pandas.PeriodDtype SA01" \ + -i "pandas.PeriodDtype.freq SA01" \ + -i "pandas.PeriodIndex.day SA01" \ + -i "pandas.PeriodIndex.day_of_week SA01" \ + -i "pandas.PeriodIndex.day_of_year SA01" \ + -i "pandas.PeriodIndex.dayofweek SA01" \ + -i "pandas.PeriodIndex.dayofyear SA01" \ + -i "pandas.PeriodIndex.days_in_month SA01" \ + -i "pandas.PeriodIndex.daysinmonth SA01" \ + -i "pandas.PeriodIndex.freqstr SA01" \ + -i "pandas.PeriodIndex.from_fields PR07,SA01" \ + -i "pandas.PeriodIndex.from_ordinals SA01" \ + -i "pandas.PeriodIndex.hour SA01" \ + -i "pandas.PeriodIndex.is_leap_year SA01" \ + -i "pandas.PeriodIndex.minute SA01" \ + -i "pandas.PeriodIndex.month SA01" \ + -i "pandas.PeriodIndex.quarter SA01" \ + -i "pandas.PeriodIndex.qyear GL08" \ + -i "pandas.PeriodIndex.second SA01" \ + -i "pandas.PeriodIndex.to_timestamp RT03,SA01" \ + -i "pandas.PeriodIndex.week SA01" \ + -i "pandas.PeriodIndex.weekday SA01" \ + -i "pandas.PeriodIndex.weekofyear SA01" \ + -i "pandas.PeriodIndex.year SA01" \ + -i "pandas.RangeIndex PR07" \ + -i "pandas.RangeIndex.from_range PR01,SA01" \ + -i "pandas.RangeIndex.start SA01" \ + -i "pandas.RangeIndex.step SA01" \ + -i "pandas.RangeIndex.stop SA01" \ + -i "pandas.Series SA01" \ + -i "pandas.Series.T SA01" \ + -i "pandas.Series.__iter__ RT03,SA01" \ + -i "pandas.Series.add PR07" \ + -i "pandas.Series.at_time PR01" \ + -i "pandas.Series.backfill PR01,SA01" \ + -i "pandas.Series.bfill SA01" \ + -i "pandas.Series.case_when RT03" \ + -i "pandas.Series.cat PR07,SA01" \ + -i "pandas.Series.cat.add_categories PR01,PR02" \ + -i "pandas.Series.cat.as_ordered PR01" \ + -i "pandas.Series.cat.as_unordered PR01" \ + -i "pandas.Series.cat.codes SA01" \ + -i "pandas.Series.cat.ordered SA01" \ + -i "pandas.Series.cat.remove_categories PR01,PR02" \ + -i "pandas.Series.cat.remove_unused_categories PR01" \ + -i "pandas.Series.cat.rename_categories PR01,PR02" \ + -i "pandas.Series.cat.reorder_categories PR01,PR02" \ + -i "pandas.Series.cat.set_categories PR01,PR02" \ + -i "pandas.Series.copy SA01" \ + -i "pandas.Series.div PR07" \ + -i "pandas.Series.droplevel SA01" \ + -i "pandas.Series.dt.as_unit PR01,PR02" \ + -i "pandas.Series.dt.ceil PR01,PR02,SA01" \ + -i "pandas.Series.dt.components SA01" \ + -i "pandas.Series.dt.date SA01" \ + -i "pandas.Series.dt.day SA01" \ + -i "pandas.Series.dt.day_name PR01,PR02,SA01" \ + -i "pandas.Series.dt.day_of_year SA01" \ + -i "pandas.Series.dt.dayofyear SA01" \ + -i "pandas.Series.dt.days SA01" \ + -i "pandas.Series.dt.days_in_month SA01" \ + -i "pandas.Series.dt.daysinmonth SA01" \ + -i "pandas.Series.dt.floor PR01,PR02,SA01" \ + -i "pandas.Series.dt.freq GL08" \ + -i "pandas.Series.dt.hour SA01" \ + -i "pandas.Series.dt.is_leap_year SA01" \ + -i "pandas.Series.dt.microsecond SA01" \ + -i "pandas.Series.dt.microseconds SA01" \ + -i "pandas.Series.dt.minute SA01" \ + -i "pandas.Series.dt.month SA01" \ + -i "pandas.Series.dt.month_name PR01,PR02,SA01" \ + -i "pandas.Series.dt.nanosecond SA01" \ + -i "pandas.Series.dt.nanoseconds SA01" \ + -i "pandas.Series.dt.normalize PR01" \ + -i "pandas.Series.dt.quarter SA01" \ + -i "pandas.Series.dt.qyear GL08" \ + -i "pandas.Series.dt.round PR01,PR02,SA01" \ + -i "pandas.Series.dt.second SA01" \ + -i "pandas.Series.dt.seconds SA01" \ + -i "pandas.Series.dt.strftime PR01,PR02" \ + -i "pandas.Series.dt.time SA01" \ + -i "pandas.Series.dt.timetz SA01" \ + -i "pandas.Series.dt.to_period PR01,PR02,RT03" \ + -i "pandas.Series.dt.total_seconds PR01" \ + -i "pandas.Series.dt.tz SA01" \ + -i "pandas.Series.dt.tz_convert PR01,PR02,RT03" \ + -i "pandas.Series.dt.tz_localize PR01,PR02" \ + -i "pandas.Series.dt.unit GL08" \ + -i "pandas.Series.dt.year SA01" \ + -i "pandas.Series.dtype SA01" \ + -i "pandas.Series.dtypes SA01" \ + -i "pandas.Series.empty GL08" \ + -i "pandas.Series.eq PR07,SA01" \ + -i "pandas.Series.ffill SA01" \ + -i "pandas.Series.first_valid_index SA01" \ + -i "pandas.Series.floordiv PR07" \ + -i "pandas.Series.ge PR07,SA01" \ + -i "pandas.Series.get SA01" \ + -i "pandas.Series.gt PR07,SA01" \ + -i "pandas.Series.hasnans SA01" \ + -i "pandas.Series.infer_objects RT03" \ + -i "pandas.Series.is_monotonic_decreasing SA01" \ + -i "pandas.Series.is_monotonic_increasing SA01" \ + -i "pandas.Series.is_unique SA01" \ + -i "pandas.Series.item SA01" \ + -i "pandas.Series.keys SA01" \ + -i "pandas.Series.kurt RT03,SA01" \ + -i "pandas.Series.kurtosis RT03,SA01" \ + -i "pandas.Series.last_valid_index SA01" \ + -i "pandas.Series.le PR07,SA01" \ + -i "pandas.Series.list.__getitem__ SA01" \ + -i "pandas.Series.list.flatten SA01" \ + -i "pandas.Series.list.len SA01" \ + -i "pandas.Series.lt PR07,SA01" \ + -i "pandas.Series.mask RT03" \ + -i "pandas.Series.max RT03" \ + -i "pandas.Series.mean RT03,SA01" \ + -i "pandas.Series.median RT03,SA01" \ + -i "pandas.Series.min RT03" \ + -i "pandas.Series.mod PR07" \ + -i "pandas.Series.mode SA01" \ + -i "pandas.Series.mul PR07" \ + -i "pandas.Series.nbytes SA01" \ + -i "pandas.Series.ndim SA01" \ + -i "pandas.Series.ne PR07,SA01" \ + -i "pandas.Series.nunique RT03" \ + -i "pandas.Series.pad PR01,SA01" \ + -i "pandas.Series.plot PR02,SA01" \ + -i "pandas.Series.pop RT03,SA01" \ + -i "pandas.Series.pow PR07" \ + -i "pandas.Series.prod RT03" \ + -i "pandas.Series.product RT03" \ + -i "pandas.Series.radd PR07" \ + -i "pandas.Series.rdiv PR07" \ + -i "pandas.Series.reorder_levels RT03,SA01" \ + -i "pandas.Series.rfloordiv PR07" \ + -i "pandas.Series.rmod PR07" \ + -i "pandas.Series.rmul PR07" \ + -i "pandas.Series.rpow PR07" \ + -i "pandas.Series.rsub PR07" \ + -i "pandas.Series.rtruediv PR07" \ + -i "pandas.Series.sem PR01,RT03,SA01" \ + -i "pandas.Series.shape SA01" \ + -i "pandas.Series.size SA01" \ + -i "pandas.Series.skew RT03,SA01" \ + -i "pandas.Series.sparse PR01,SA01" \ + -i "pandas.Series.sparse.density SA01" \ + -i "pandas.Series.sparse.fill_value SA01" \ + -i "pandas.Series.sparse.from_coo PR07,SA01" \ + -i "pandas.Series.sparse.npoints SA01" \ + -i "pandas.Series.sparse.sp_values SA01" \ + -i "pandas.Series.sparse.to_coo PR07,RT03,SA01" \ + -i "pandas.Series.std PR01,RT03,SA01" \ + -i "pandas.Series.str PR01,SA01" \ + -i "pandas.Series.str.capitalize RT03" \ + -i "pandas.Series.str.casefold RT03" \ + -i "pandas.Series.str.center RT03,SA01" \ + -i "pandas.Series.str.decode PR07,RT03,SA01" \ + -i "pandas.Series.str.encode PR07,RT03,SA01" \ + -i "pandas.Series.str.find RT03" \ + -i "pandas.Series.str.fullmatch RT03" \ + -i "pandas.Series.str.get RT03,SA01" \ + -i "pandas.Series.str.index RT03" \ + -i "pandas.Series.str.ljust RT03,SA01" \ + -i "pandas.Series.str.lower RT03" \ + -i "pandas.Series.str.lstrip RT03" \ + -i "pandas.Series.str.match RT03" \ + -i "pandas.Series.str.normalize RT03,SA01" \ + -i "pandas.Series.str.partition RT03" \ + -i "pandas.Series.str.repeat SA01" \ + -i "pandas.Series.str.replace SA01" \ + -i "pandas.Series.str.rfind RT03" \ + -i "pandas.Series.str.rindex RT03" \ + -i "pandas.Series.str.rjust RT03,SA01" \ + -i "pandas.Series.str.rpartition RT03" \ + -i "pandas.Series.str.rstrip RT03" \ + -i "pandas.Series.str.strip RT03" \ + -i "pandas.Series.str.swapcase RT03" \ + -i "pandas.Series.str.title RT03" \ + -i "pandas.Series.str.translate RT03,SA01" \ + -i "pandas.Series.str.upper RT03" \ + -i "pandas.Series.str.wrap RT03,SA01" \ + -i "pandas.Series.str.zfill RT03" \ + -i "pandas.Series.struct.dtypes SA01" \ + -i "pandas.Series.sub PR07" \ + -i "pandas.Series.sum RT03" \ + -i "pandas.Series.swaplevel SA01" \ + -i "pandas.Series.to_dict SA01" \ + -i "pandas.Series.to_frame SA01" \ + -i "pandas.Series.to_list RT03" \ + -i "pandas.Series.to_markdown SA01" \ + -i "pandas.Series.to_period SA01" \ + -i "pandas.Series.to_string SA01" \ + -i "pandas.Series.to_timestamp RT03,SA01" \ + -i "pandas.Series.truediv PR07" \ + -i "pandas.Series.tz_convert SA01" \ + -i "pandas.Series.tz_localize SA01" \ + -i "pandas.Series.unstack SA01" \ + -i "pandas.Series.update PR07,SA01" \ + -i "pandas.Series.value_counts RT03" \ + -i "pandas.Series.var PR01,RT03,SA01" \ + -i "pandas.Series.where RT03" \ + -i "pandas.SparseDtype SA01" \ + -i "pandas.Timedelta PR07,SA01" \ + -i "pandas.Timedelta.as_unit SA01" \ + -i "pandas.Timedelta.asm8 SA01" \ + -i "pandas.Timedelta.ceil SA01" \ + -i "pandas.Timedelta.components SA01" \ + -i "pandas.Timedelta.days SA01" \ + -i "pandas.Timedelta.floor SA01" \ + -i "pandas.Timedelta.max PR02,PR07,SA01" \ + -i "pandas.Timedelta.min PR02,PR07,SA01" \ + -i "pandas.Timedelta.resolution PR02,PR07,SA01" \ + -i "pandas.Timedelta.round SA01" \ + -i "pandas.Timedelta.to_numpy PR01" \ + -i "pandas.Timedelta.to_timedelta64 SA01" \ + -i "pandas.Timedelta.total_seconds SA01" \ + -i "pandas.Timedelta.view SA01" \ + -i "pandas.TimedeltaIndex PR01" \ + -i "pandas.TimedeltaIndex.as_unit RT03,SA01" \ + -i "pandas.TimedeltaIndex.ceil SA01" \ + -i "pandas.TimedeltaIndex.components SA01" \ + -i "pandas.TimedeltaIndex.days SA01" \ + -i "pandas.TimedeltaIndex.floor SA01" \ + -i "pandas.TimedeltaIndex.inferred_freq SA01" \ + -i "pandas.TimedeltaIndex.microseconds SA01" \ + -i "pandas.TimedeltaIndex.nanoseconds SA01" \ + -i "pandas.TimedeltaIndex.round SA01" \ + -i "pandas.TimedeltaIndex.seconds SA01" \ + -i "pandas.TimedeltaIndex.to_pytimedelta RT03,SA01" \ + -i "pandas.Timestamp PR07,SA01" \ + -i "pandas.Timestamp.as_unit SA01" \ + -i "pandas.Timestamp.asm8 SA01" \ + -i "pandas.Timestamp.astimezone SA01" \ + -i "pandas.Timestamp.ceil SA01" \ + -i "pandas.Timestamp.combine PR01,SA01" \ + -i "pandas.Timestamp.ctime SA01" \ + -i "pandas.Timestamp.date SA01" \ + -i "pandas.Timestamp.day GL08" \ + -i "pandas.Timestamp.day_name SA01" \ + -i "pandas.Timestamp.day_of_week SA01" \ + -i "pandas.Timestamp.day_of_year SA01" \ + -i "pandas.Timestamp.dayofweek SA01" \ + -i "pandas.Timestamp.dayofyear SA01" \ + -i "pandas.Timestamp.days_in_month SA01" \ + -i "pandas.Timestamp.daysinmonth SA01" \ + -i "pandas.Timestamp.dst SA01" \ + -i "pandas.Timestamp.floor SA01" \ + -i "pandas.Timestamp.fold GL08" \ + -i "pandas.Timestamp.fromordinal SA01" \ + -i "pandas.Timestamp.fromtimestamp PR01,SA01" \ + -i "pandas.Timestamp.hour GL08" \ + -i "pandas.Timestamp.is_leap_year SA01" \ + -i "pandas.Timestamp.isocalendar SA01" \ + -i "pandas.Timestamp.isoformat SA01" \ + -i "pandas.Timestamp.isoweekday SA01" \ + -i "pandas.Timestamp.max PR02,PR07,SA01" \ + -i "pandas.Timestamp.microsecond GL08" \ + -i "pandas.Timestamp.min PR02,PR07,SA01" \ + -i "pandas.Timestamp.minute GL08" \ + -i "pandas.Timestamp.month GL08" \ + -i "pandas.Timestamp.month_name SA01" \ + -i "pandas.Timestamp.nanosecond GL08" \ + -i "pandas.Timestamp.normalize SA01" \ + -i "pandas.Timestamp.now SA01" \ + -i "pandas.Timestamp.quarter SA01" \ + -i "pandas.Timestamp.replace PR07,SA01" \ + -i "pandas.Timestamp.resolution PR02,PR07,SA01" \ + -i "pandas.Timestamp.round SA01" \ + -i "pandas.Timestamp.second GL08" \ + -i "pandas.Timestamp.strftime SA01" \ + -i "pandas.Timestamp.strptime PR01,SA01" \ + -i "pandas.Timestamp.time SA01" \ + -i "pandas.Timestamp.timestamp SA01" \ + -i "pandas.Timestamp.timetuple SA01" \ + -i "pandas.Timestamp.timetz SA01" \ + -i "pandas.Timestamp.to_datetime64 SA01" \ + -i "pandas.Timestamp.to_julian_date SA01" \ + -i "pandas.Timestamp.to_numpy PR01" \ + -i "pandas.Timestamp.to_period PR01,SA01" \ + -i "pandas.Timestamp.to_pydatetime PR01,SA01" \ + -i "pandas.Timestamp.today SA01" \ + -i "pandas.Timestamp.toordinal SA01" \ + -i "pandas.Timestamp.tz SA01" \ + -i "pandas.Timestamp.tz_convert SA01" \ + -i "pandas.Timestamp.tz_localize SA01" \ + -i "pandas.Timestamp.tzinfo GL08" \ + -i "pandas.Timestamp.tzname SA01" \ + -i "pandas.Timestamp.unit SA01" \ + -i "pandas.Timestamp.utcfromtimestamp PR01,SA01" \ + -i "pandas.Timestamp.utcnow SA01" \ + -i "pandas.Timestamp.utcoffset SA01" \ + -i "pandas.Timestamp.utctimetuple SA01" \ + -i "pandas.Timestamp.value GL08" \ + -i "pandas.Timestamp.week SA01" \ + -i "pandas.Timestamp.weekday SA01" \ + -i "pandas.Timestamp.weekofyear SA01" \ + -i "pandas.Timestamp.year GL08" \ + -i "pandas.UInt16Dtype SA01" \ + -i "pandas.UInt32Dtype SA01" \ + -i "pandas.UInt64Dtype SA01" \ + -i "pandas.UInt8Dtype SA01" \ + -i "pandas.api.extensions.ExtensionArray SA01" \ + -i "pandas.api.extensions.ExtensionArray._accumulate RT03,SA01" \ + -i "pandas.api.extensions.ExtensionArray._concat_same_type PR07,SA01" \ + -i "pandas.api.extensions.ExtensionArray._formatter SA01" \ + -i "pandas.api.extensions.ExtensionArray._from_sequence SA01" \ + -i "pandas.api.extensions.ExtensionArray._from_sequence_of_strings SA01" \ + -i "pandas.api.extensions.ExtensionArray._hash_pandas_object RT03,SA01" \ + -i "pandas.api.extensions.ExtensionArray._pad_or_backfill PR01,RT03,SA01" \ + -i "pandas.api.extensions.ExtensionArray._reduce RT03,SA01" \ + -i "pandas.api.extensions.ExtensionArray._values_for_factorize SA01" \ + -i "pandas.api.extensions.ExtensionArray.astype SA01" \ + -i "pandas.api.extensions.ExtensionArray.copy RT03,SA01" \ + -i "pandas.api.extensions.ExtensionArray.dropna RT03,SA01" \ + -i "pandas.api.extensions.ExtensionArray.dtype SA01" \ + -i "pandas.api.extensions.ExtensionArray.duplicated RT03,SA01" \ + -i "pandas.api.extensions.ExtensionArray.equals SA01" \ + -i "pandas.api.extensions.ExtensionArray.fillna SA01" \ + -i "pandas.api.extensions.ExtensionArray.insert PR07,RT03,SA01" \ + -i "pandas.api.extensions.ExtensionArray.interpolate PR01,SA01" \ + -i "pandas.api.extensions.ExtensionArray.isin PR07,RT03,SA01" \ + -i "pandas.api.extensions.ExtensionArray.isna SA01" \ + -i "pandas.api.extensions.ExtensionArray.nbytes SA01" \ + -i "pandas.api.extensions.ExtensionArray.ndim SA01" \ + -i "pandas.api.extensions.ExtensionArray.ravel RT03,SA01" \ + -i "pandas.api.extensions.ExtensionArray.shape SA01" \ + -i "pandas.api.extensions.ExtensionArray.shift SA01" \ + -i "pandas.api.extensions.ExtensionArray.take RT03" \ + -i "pandas.api.extensions.ExtensionArray.tolist RT03,SA01" \ + -i "pandas.api.extensions.ExtensionArray.unique RT03,SA01" \ + -i "pandas.api.extensions.ExtensionArray.view SA01" \ + -i "pandas.api.extensions.register_extension_dtype SA01" \ + -i "pandas.api.indexers.BaseIndexer PR01,SA01" \ + -i "pandas.api.indexers.FixedForwardWindowIndexer PR01,SA01" \ + -i "pandas.api.indexers.VariableOffsetWindowIndexer PR01,SA01" \ + -i "pandas.api.interchange.from_dataframe RT03,SA01" \ + -i "pandas.api.types.infer_dtype PR07,SA01" \ + -i "pandas.api.types.is_any_real_numeric_dtype SA01" \ + -i "pandas.api.types.is_bool PR01,SA01" \ + -i "pandas.api.types.is_bool_dtype SA01" \ + -i "pandas.api.types.is_categorical_dtype SA01" \ + -i "pandas.api.types.is_complex PR01,SA01" \ + -i "pandas.api.types.is_complex_dtype SA01" \ + -i "pandas.api.types.is_datetime64_any_dtype SA01" \ + -i "pandas.api.types.is_datetime64_dtype SA01" \ + -i "pandas.api.types.is_datetime64_ns_dtype SA01" \ + -i "pandas.api.types.is_datetime64tz_dtype SA01" \ + -i "pandas.api.types.is_dict_like PR07,SA01" \ + -i "pandas.api.types.is_extension_array_dtype SA01" \ + -i "pandas.api.types.is_file_like PR07,SA01" \ + -i "pandas.api.types.is_float PR01,SA01" \ + -i "pandas.api.types.is_float_dtype SA01" \ + -i "pandas.api.types.is_hashable PR01,RT03,SA01" \ + -i "pandas.api.types.is_int64_dtype SA01" \ + -i "pandas.api.types.is_integer PR01,SA01" \ + -i "pandas.api.types.is_integer_dtype SA01" \ + -i "pandas.api.types.is_interval_dtype SA01" \ + -i "pandas.api.types.is_iterator PR07,SA01" \ + -i "pandas.api.types.is_list_like SA01" \ + -i "pandas.api.types.is_named_tuple PR07,SA01" \ + -i "pandas.api.types.is_numeric_dtype SA01" \ + -i "pandas.api.types.is_object_dtype SA01" \ + -i "pandas.api.types.is_period_dtype SA01" \ + -i "pandas.api.types.is_re PR07,SA01" \ + -i "pandas.api.types.is_re_compilable PR07,SA01" \ + -i "pandas.api.types.is_scalar SA01" \ + -i "pandas.api.types.is_signed_integer_dtype SA01" \ + -i "pandas.api.types.is_sparse SA01" \ + -i "pandas.api.types.is_string_dtype SA01" \ + -i "pandas.api.types.is_timedelta64_dtype SA01" \ + -i "pandas.api.types.is_timedelta64_ns_dtype SA01" \ + -i "pandas.api.types.is_unsigned_integer_dtype SA01" \ + -i "pandas.api.types.pandas_dtype PR07,RT03,SA01" \ + -i "pandas.api.types.union_categoricals RT03,SA01" \ + -i "pandas.arrays.ArrowExtensionArray PR07,SA01" \ + -i "pandas.arrays.BooleanArray SA01" \ + -i "pandas.arrays.DatetimeArray SA01" \ + -i "pandas.arrays.FloatingArray SA01" \ + -i "pandas.arrays.IntegerArray SA01" \ + -i "pandas.arrays.IntervalArray.closed SA01" \ + -i "pandas.arrays.IntervalArray.contains RT03" \ + -i "pandas.arrays.IntervalArray.is_non_overlapping_monotonic SA01" \ + -i "pandas.arrays.IntervalArray.left SA01" \ + -i "pandas.arrays.IntervalArray.length SA01" \ + -i "pandas.arrays.IntervalArray.mid SA01" \ + -i "pandas.arrays.IntervalArray.right SA01" \ + -i "pandas.arrays.IntervalArray.set_closed RT03,SA01" \ + -i "pandas.arrays.IntervalArray.to_tuples RT03,SA01" \ + -i "pandas.arrays.NumpyExtensionArray SA01" \ + -i "pandas.arrays.SparseArray PR07,SA01" \ + -i "pandas.arrays.TimedeltaArray PR07,SA01" \ + -i "pandas.bdate_range RT03,SA01" \ + -i "pandas.core.groupby.DataFrameGroupBy.__iter__ RT03,SA01" \ + -i "pandas.core.groupby.DataFrameGroupBy.agg RT03" \ + -i "pandas.core.groupby.DataFrameGroupBy.aggregate RT03" \ + -i "pandas.core.groupby.DataFrameGroupBy.apply RT03" \ + -i "pandas.core.groupby.DataFrameGroupBy.boxplot PR07,RT03,SA01" \ + -i "pandas.core.groupby.DataFrameGroupBy.cummax RT03" \ + -i "pandas.core.groupby.DataFrameGroupBy.cummin RT03" \ + -i "pandas.core.groupby.DataFrameGroupBy.cumprod RT03" \ + -i "pandas.core.groupby.DataFrameGroupBy.cumsum RT03" \ + -i "pandas.core.groupby.DataFrameGroupBy.filter RT03,SA01" \ + -i "pandas.core.groupby.DataFrameGroupBy.get_group RT03,SA01" \ + -i "pandas.core.groupby.DataFrameGroupBy.groups SA01" \ + -i "pandas.core.groupby.DataFrameGroupBy.hist RT03" \ + -i "pandas.core.groupby.DataFrameGroupBy.indices SA01" \ + -i "pandas.core.groupby.DataFrameGroupBy.max SA01" \ + -i "pandas.core.groupby.DataFrameGroupBy.mean RT03" \ + -i "pandas.core.groupby.DataFrameGroupBy.median SA01" \ + -i "pandas.core.groupby.DataFrameGroupBy.min SA01" \ + -i "pandas.core.groupby.DataFrameGroupBy.nth PR02" \ + -i "pandas.core.groupby.DataFrameGroupBy.nunique RT03,SA01" \ + -i "pandas.core.groupby.DataFrameGroupBy.ohlc SA01" \ + -i "pandas.core.groupby.DataFrameGroupBy.plot PR02,SA01" \ + -i "pandas.core.groupby.DataFrameGroupBy.prod SA01" \ + -i "pandas.core.groupby.DataFrameGroupBy.rank RT03" \ + -i "pandas.core.groupby.DataFrameGroupBy.resample RT03" \ + -i "pandas.core.groupby.DataFrameGroupBy.sem SA01" \ + -i "pandas.core.groupby.DataFrameGroupBy.skew RT03" \ + -i "pandas.core.groupby.DataFrameGroupBy.sum SA01" \ + -i "pandas.core.groupby.DataFrameGroupBy.transform RT03" \ + -i "pandas.core.groupby.SeriesGroupBy.__iter__ RT03,SA01" \ + -i "pandas.core.groupby.SeriesGroupBy.agg RT03" \ + -i "pandas.core.groupby.SeriesGroupBy.aggregate RT03" \ + -i "pandas.core.groupby.SeriesGroupBy.apply RT03" \ + -i "pandas.core.groupby.SeriesGroupBy.cummax RT03" \ + -i "pandas.core.groupby.SeriesGroupBy.cummin RT03" \ + -i "pandas.core.groupby.SeriesGroupBy.cumprod RT03" \ + -i "pandas.core.groupby.SeriesGroupBy.cumsum RT03" \ + -i "pandas.core.groupby.SeriesGroupBy.filter PR01,RT03,SA01" \ + -i "pandas.core.groupby.SeriesGroupBy.get_group RT03,SA01" \ + -i "pandas.core.groupby.SeriesGroupBy.groups SA01" \ + -i "pandas.core.groupby.SeriesGroupBy.indices SA01" \ + -i "pandas.core.groupby.SeriesGroupBy.is_monotonic_decreasing SA01" \ + -i "pandas.core.groupby.SeriesGroupBy.is_monotonic_increasing SA01" \ + -i "pandas.core.groupby.SeriesGroupBy.max SA01" \ + -i "pandas.core.groupby.SeriesGroupBy.mean RT03" \ + -i "pandas.core.groupby.SeriesGroupBy.median SA01" \ + -i "pandas.core.groupby.SeriesGroupBy.min SA01" \ + -i "pandas.core.groupby.SeriesGroupBy.nth PR02" \ + -i "pandas.core.groupby.SeriesGroupBy.ohlc SA01" \ + -i "pandas.core.groupby.SeriesGroupBy.plot PR02,SA01" \ + -i "pandas.core.groupby.SeriesGroupBy.prod SA01" \ + -i "pandas.core.groupby.SeriesGroupBy.rank RT03" \ + -i "pandas.core.groupby.SeriesGroupBy.resample RT03" \ + -i "pandas.core.groupby.SeriesGroupBy.sem SA01" \ + -i "pandas.core.groupby.SeriesGroupBy.skew RT03" \ + -i "pandas.core.groupby.SeriesGroupBy.sum SA01" \ + -i "pandas.core.groupby.SeriesGroupBy.transform RT03" \ + -i "pandas.core.resample.Resampler.__iter__ RT03,SA01" \ + -i "pandas.core.resample.Resampler.ffill RT03" \ + -i "pandas.core.resample.Resampler.get_group RT03,SA01" \ + -i "pandas.core.resample.Resampler.groups SA01" \ + -i "pandas.core.resample.Resampler.indices SA01" \ + -i "pandas.core.resample.Resampler.max PR01,RT03,SA01" \ + -i "pandas.core.resample.Resampler.mean SA01" \ + -i "pandas.core.resample.Resampler.median SA01" \ + -i "pandas.core.resample.Resampler.min PR01,RT03,SA01" \ + -i "pandas.core.resample.Resampler.ohlc SA01" \ + -i "pandas.core.resample.Resampler.prod SA01" \ + -i "pandas.core.resample.Resampler.quantile PR01,PR07" \ + -i "pandas.core.resample.Resampler.sem SA01" \ + -i "pandas.core.resample.Resampler.std SA01" \ + -i "pandas.core.resample.Resampler.sum SA01" \ + -i "pandas.core.resample.Resampler.transform PR01,RT03,SA01" \ + -i "pandas.core.resample.Resampler.var SA01" \ + -i "pandas.core.window.expanding.Expanding.corr PR01" \ + -i "pandas.core.window.expanding.Expanding.count PR01" \ + -i "pandas.core.window.rolling.Rolling.max PR01" \ + -i "pandas.core.window.rolling.Window.std PR01" \ + -i "pandas.core.window.rolling.Window.var PR01" \ + -i "pandas.date_range RT03" \ + -i "pandas.describe_option SA01" \ + -i "pandas.errors.AbstractMethodError PR01,SA01" \ + -i "pandas.errors.AttributeConflictWarning SA01" \ + -i "pandas.errors.CSSWarning SA01" \ + -i "pandas.errors.CategoricalConversionWarning SA01" \ + -i "pandas.errors.ChainedAssignmentError SA01" \ + -i "pandas.errors.ClosedFileError SA01" \ + -i "pandas.errors.DataError SA01" \ + -i "pandas.errors.DuplicateLabelError SA01" \ + -i "pandas.errors.EmptyDataError SA01" \ + -i "pandas.errors.IntCastingNaNError SA01" \ + -i "pandas.errors.InvalidIndexError SA01" \ + -i "pandas.errors.InvalidVersion SA01" \ + -i "pandas.errors.MergeError SA01" \ + -i "pandas.errors.NullFrequencyError SA01" \ + -i "pandas.errors.NumExprClobberingError SA01" \ + -i "pandas.errors.NumbaUtilError SA01" \ + -i "pandas.errors.OptionError SA01" \ + -i "pandas.errors.OutOfBoundsDatetime SA01" \ + -i "pandas.errors.OutOfBoundsTimedelta SA01" \ + -i "pandas.errors.PerformanceWarning SA01" \ + -i "pandas.errors.PossibleDataLossError SA01" \ + -i "pandas.errors.PossiblePrecisionLoss SA01" \ + -i "pandas.errors.SpecificationError SA01" \ + -i "pandas.errors.UndefinedVariableError PR01,SA01" \ + -i "pandas.errors.UnsortedIndexError SA01" \ + -i "pandas.errors.UnsupportedFunctionCall SA01" \ + -i "pandas.errors.ValueLabelTypeMismatch SA01" \ + -i "pandas.get_option SA01" \ + -i "pandas.infer_freq SA01" \ + -i "pandas.interval_range RT03" \ + -i "pandas.io.formats.style.Styler.apply RT03" \ + -i "pandas.io.formats.style.Styler.apply_index RT03" \ + -i "pandas.io.formats.style.Styler.background_gradient RT03" \ + -i "pandas.io.formats.style.Styler.bar RT03,SA01" \ + -i "pandas.io.formats.style.Styler.clear SA01" \ + -i "pandas.io.formats.style.Styler.concat RT03,SA01" \ + -i "pandas.io.formats.style.Styler.export RT03" \ + -i "pandas.io.formats.style.Styler.format RT03" \ + -i "pandas.io.formats.style.Styler.format_index RT03" \ + -i "pandas.io.formats.style.Styler.from_custom_template SA01" \ + -i "pandas.io.formats.style.Styler.hide RT03,SA01" \ + -i "pandas.io.formats.style.Styler.highlight_between RT03" \ + -i "pandas.io.formats.style.Styler.highlight_max RT03" \ + -i "pandas.io.formats.style.Styler.highlight_min RT03" \ + -i "pandas.io.formats.style.Styler.highlight_null RT03" \ + -i "pandas.io.formats.style.Styler.highlight_quantile RT03" \ + -i "pandas.io.formats.style.Styler.map RT03" \ + -i "pandas.io.formats.style.Styler.map_index RT03" \ + -i "pandas.io.formats.style.Styler.relabel_index RT03" \ + -i "pandas.io.formats.style.Styler.set_caption RT03,SA01" \ + -i "pandas.io.formats.style.Styler.set_properties RT03,SA01" \ + -i "pandas.io.formats.style.Styler.set_sticky RT03,SA01" \ + -i "pandas.io.formats.style.Styler.set_table_attributes PR07,RT03" \ + -i "pandas.io.formats.style.Styler.set_table_styles RT03" \ + -i "pandas.io.formats.style.Styler.set_td_classes RT03" \ + -i "pandas.io.formats.style.Styler.set_tooltips RT03,SA01" \ + -i "pandas.io.formats.style.Styler.set_uuid PR07,RT03,SA01" \ + -i "pandas.io.formats.style.Styler.text_gradient RT03" \ + -i "pandas.io.formats.style.Styler.to_excel PR01" \ + -i "pandas.io.formats.style.Styler.to_string SA01" \ + -i "pandas.io.formats.style.Styler.use RT03" \ + -i "pandas.io.json.build_table_schema PR07,RT03,SA01" \ + -i "pandas.io.stata.StataReader.data_label SA01" \ + -i "pandas.io.stata.StataReader.value_labels RT03,SA01" \ + -i "pandas.io.stata.StataReader.variable_labels RT03,SA01" \ + -i "pandas.io.stata.StataWriter.write_file SA01" \ + -i "pandas.json_normalize RT03,SA01" \ + -i "pandas.merge PR07" \ + -i "pandas.merge_asof PR07,RT03" \ + -i "pandas.merge_ordered PR07" \ + -i "pandas.option_context SA01" \ + -i "pandas.period_range RT03,SA01" \ + -i "pandas.pivot PR07" \ + -i "pandas.pivot_table PR07" \ + -i "pandas.plotting.andrews_curves RT03,SA01" \ + -i "pandas.plotting.autocorrelation_plot RT03,SA01" \ + -i "pandas.plotting.lag_plot RT03,SA01" \ + -i "pandas.plotting.parallel_coordinates PR07,RT03,SA01" \ + -i "pandas.plotting.plot_params SA01" \ + -i "pandas.plotting.scatter_matrix PR07,SA01" \ + -i "pandas.plotting.table PR07,RT03,SA01" \ + -i "pandas.qcut PR07,SA01" \ + -i "pandas.read_feather SA01" \ + -i "pandas.read_orc SA01" \ + -i "pandas.read_sas SA01" \ + -i "pandas.read_spss SA01" \ + -i "pandas.reset_option SA01" \ + -i "pandas.set_eng_float_format RT03,SA01" \ + -i "pandas.set_option SA01" \ + -i "pandas.show_versions SA01" \ + -i "pandas.test SA01" \ + -i "pandas.testing.assert_extension_array_equal SA01" \ + -i "pandas.testing.assert_index_equal PR07,SA01" \ + -i "pandas.testing.assert_series_equal PR07,SA01" \ + -i "pandas.timedelta_range SA01" \ + -i "pandas.tseries.api.guess_datetime_format SA01" \ + -i "pandas.tseries.offsets.BDay PR02,SA01" \ + -i "pandas.tseries.offsets.BMonthBegin PR02" \ + -i "pandas.tseries.offsets.BMonthEnd PR02" \ + -i "pandas.tseries.offsets.BQuarterBegin PR02" \ + -i "pandas.tseries.offsets.BQuarterBegin.copy SA01" \ + -i "pandas.tseries.offsets.BQuarterBegin.freqstr SA01" \ + -i "pandas.tseries.offsets.BQuarterBegin.is_on_offset GL08" \ + -i "pandas.tseries.offsets.BQuarterBegin.kwds SA01" \ + -i "pandas.tseries.offsets.BQuarterBegin.n GL08" \ + -i "pandas.tseries.offsets.BQuarterBegin.name SA01" \ + -i "pandas.tseries.offsets.BQuarterBegin.nanos GL08" \ + -i "pandas.tseries.offsets.BQuarterBegin.normalize GL08" \ + -i "pandas.tseries.offsets.BQuarterBegin.rule_code GL08" \ + -i "pandas.tseries.offsets.BQuarterBegin.startingMonth GL08" \ + -i "pandas.tseries.offsets.BQuarterEnd PR02" \ + -i "pandas.tseries.offsets.BQuarterEnd.copy SA01" \ + -i "pandas.tseries.offsets.BQuarterEnd.freqstr SA01" \ + -i "pandas.tseries.offsets.BQuarterEnd.is_on_offset GL08" \ + -i "pandas.tseries.offsets.BQuarterEnd.kwds SA01" \ + -i "pandas.tseries.offsets.BQuarterEnd.n GL08" \ + -i "pandas.tseries.offsets.BQuarterEnd.name SA01" \ + -i "pandas.tseries.offsets.BQuarterEnd.nanos GL08" \ + -i "pandas.tseries.offsets.BQuarterEnd.normalize GL08" \ + -i "pandas.tseries.offsets.BQuarterEnd.rule_code GL08" \ + -i "pandas.tseries.offsets.BQuarterEnd.startingMonth GL08" \ + -i "pandas.tseries.offsets.BYearBegin PR02" \ + -i "pandas.tseries.offsets.BYearBegin.copy SA01" \ + -i "pandas.tseries.offsets.BYearBegin.freqstr SA01" \ + -i "pandas.tseries.offsets.BYearBegin.is_on_offset GL08" \ + -i "pandas.tseries.offsets.BYearBegin.kwds SA01" \ + -i "pandas.tseries.offsets.BYearBegin.month GL08" \ + -i "pandas.tseries.offsets.BYearBegin.n GL08" \ + -i "pandas.tseries.offsets.BYearBegin.name SA01" \ + -i "pandas.tseries.offsets.BYearBegin.nanos GL08" \ + -i "pandas.tseries.offsets.BYearBegin.normalize GL08" \ + -i "pandas.tseries.offsets.BYearBegin.rule_code GL08" \ + -i "pandas.tseries.offsets.BYearEnd PR02" \ + -i "pandas.tseries.offsets.BYearEnd.copy SA01" \ + -i "pandas.tseries.offsets.BYearEnd.freqstr SA01" \ + -i "pandas.tseries.offsets.BYearEnd.is_on_offset GL08" \ + -i "pandas.tseries.offsets.BYearEnd.kwds SA01" \ + -i "pandas.tseries.offsets.BYearEnd.month GL08" \ + -i "pandas.tseries.offsets.BYearEnd.n GL08" \ + -i "pandas.tseries.offsets.BYearEnd.name SA01" \ + -i "pandas.tseries.offsets.BYearEnd.nanos GL08" \ + -i "pandas.tseries.offsets.BYearEnd.normalize GL08" \ + -i "pandas.tseries.offsets.BYearEnd.rule_code GL08" \ + -i "pandas.tseries.offsets.BusinessDay PR02,SA01" \ + -i "pandas.tseries.offsets.BusinessDay.calendar GL08" \ + -i "pandas.tseries.offsets.BusinessDay.copy SA01" \ + -i "pandas.tseries.offsets.BusinessDay.freqstr SA01" \ + -i "pandas.tseries.offsets.BusinessDay.holidays GL08" \ + -i "pandas.tseries.offsets.BusinessDay.is_on_offset GL08" \ + -i "pandas.tseries.offsets.BusinessDay.kwds SA01" \ + -i "pandas.tseries.offsets.BusinessDay.n GL08" \ + -i "pandas.tseries.offsets.BusinessDay.name SA01" \ + -i "pandas.tseries.offsets.BusinessDay.nanos GL08" \ + -i "pandas.tseries.offsets.BusinessDay.normalize GL08" \ + -i "pandas.tseries.offsets.BusinessDay.rule_code GL08" \ + -i "pandas.tseries.offsets.BusinessDay.weekmask GL08" \ + -i "pandas.tseries.offsets.BusinessHour PR02,SA01" \ + -i "pandas.tseries.offsets.BusinessHour.calendar GL08" \ + -i "pandas.tseries.offsets.BusinessHour.copy SA01" \ + -i "pandas.tseries.offsets.BusinessHour.end GL08" \ + -i "pandas.tseries.offsets.BusinessHour.freqstr SA01" \ + -i "pandas.tseries.offsets.BusinessHour.holidays GL08" \ + -i "pandas.tseries.offsets.BusinessHour.is_on_offset GL08" \ + -i "pandas.tseries.offsets.BusinessHour.kwds SA01" \ + -i "pandas.tseries.offsets.BusinessHour.n GL08" \ + -i "pandas.tseries.offsets.BusinessHour.name SA01" \ + -i "pandas.tseries.offsets.BusinessHour.nanos GL08" \ + -i "pandas.tseries.offsets.BusinessHour.normalize GL08" \ + -i "pandas.tseries.offsets.BusinessHour.rule_code GL08" \ + -i "pandas.tseries.offsets.BusinessHour.start GL08" \ + -i "pandas.tseries.offsets.BusinessHour.weekmask GL08" \ + -i "pandas.tseries.offsets.BusinessMonthBegin PR02" \ + -i "pandas.tseries.offsets.BusinessMonthBegin.copy SA01" \ + -i "pandas.tseries.offsets.BusinessMonthBegin.freqstr SA01" \ + -i "pandas.tseries.offsets.BusinessMonthBegin.is_on_offset GL08" \ + -i "pandas.tseries.offsets.BusinessMonthBegin.kwds SA01" \ + -i "pandas.tseries.offsets.BusinessMonthBegin.n GL08" \ + -i "pandas.tseries.offsets.BusinessMonthBegin.name SA01" \ + -i "pandas.tseries.offsets.BusinessMonthBegin.nanos GL08" \ + -i "pandas.tseries.offsets.BusinessMonthBegin.normalize GL08" \ + -i "pandas.tseries.offsets.BusinessMonthBegin.rule_code GL08" \ + -i "pandas.tseries.offsets.BusinessMonthEnd PR02" \ + -i "pandas.tseries.offsets.BusinessMonthEnd.copy SA01" \ + -i "pandas.tseries.offsets.BusinessMonthEnd.freqstr SA01" \ + -i "pandas.tseries.offsets.BusinessMonthEnd.is_on_offset GL08" \ + -i "pandas.tseries.offsets.BusinessMonthEnd.kwds SA01" \ + -i "pandas.tseries.offsets.BusinessMonthEnd.n GL08" \ + -i "pandas.tseries.offsets.BusinessMonthEnd.name SA01" \ + -i "pandas.tseries.offsets.BusinessMonthEnd.nanos GL08" \ + -i "pandas.tseries.offsets.BusinessMonthEnd.normalize GL08" \ + -i "pandas.tseries.offsets.BusinessMonthEnd.rule_code GL08" \ + -i "pandas.tseries.offsets.CBMonthBegin PR02" \ + -i "pandas.tseries.offsets.CBMonthEnd PR02" \ + -i "pandas.tseries.offsets.CDay PR02,SA01" \ + -i "pandas.tseries.offsets.CustomBusinessDay PR02,SA01" \ + -i "pandas.tseries.offsets.CustomBusinessDay.calendar GL08" \ + -i "pandas.tseries.offsets.CustomBusinessDay.copy SA01" \ + -i "pandas.tseries.offsets.CustomBusinessDay.freqstr SA01" \ + -i "pandas.tseries.offsets.CustomBusinessDay.holidays GL08" \ + -i "pandas.tseries.offsets.CustomBusinessDay.is_on_offset GL08" \ + -i "pandas.tseries.offsets.CustomBusinessDay.kwds SA01" \ + -i "pandas.tseries.offsets.CustomBusinessDay.n GL08" \ + -i "pandas.tseries.offsets.CustomBusinessDay.name SA01" \ + -i "pandas.tseries.offsets.CustomBusinessDay.nanos GL08" \ + -i "pandas.tseries.offsets.CustomBusinessDay.normalize GL08" \ + -i "pandas.tseries.offsets.CustomBusinessDay.rule_code GL08" \ + -i "pandas.tseries.offsets.CustomBusinessDay.weekmask GL08" \ + -i "pandas.tseries.offsets.CustomBusinessHour PR02,SA01" \ + -i "pandas.tseries.offsets.CustomBusinessHour.calendar GL08" \ + -i "pandas.tseries.offsets.CustomBusinessHour.copy SA01" \ + -i "pandas.tseries.offsets.CustomBusinessHour.end GL08" \ + -i "pandas.tseries.offsets.CustomBusinessHour.freqstr SA01" \ + -i "pandas.tseries.offsets.CustomBusinessHour.holidays GL08" \ + -i "pandas.tseries.offsets.CustomBusinessHour.is_on_offset GL08" \ + -i "pandas.tseries.offsets.CustomBusinessHour.kwds SA01" \ + -i "pandas.tseries.offsets.CustomBusinessHour.n GL08" \ + -i "pandas.tseries.offsets.CustomBusinessHour.name SA01" \ + -i "pandas.tseries.offsets.CustomBusinessHour.nanos GL08" \ + -i "pandas.tseries.offsets.CustomBusinessHour.normalize GL08" \ + -i "pandas.tseries.offsets.CustomBusinessHour.rule_code GL08" \ + -i "pandas.tseries.offsets.CustomBusinessHour.start GL08" \ + -i "pandas.tseries.offsets.CustomBusinessHour.weekmask GL08" \ + -i "pandas.tseries.offsets.CustomBusinessMonthBegin PR02" \ + -i "pandas.tseries.offsets.CustomBusinessMonthBegin.calendar GL08" \ + -i "pandas.tseries.offsets.CustomBusinessMonthBegin.copy SA01" \ + -i "pandas.tseries.offsets.CustomBusinessMonthBegin.freqstr SA01" \ + -i "pandas.tseries.offsets.CustomBusinessMonthBegin.holidays GL08" \ + -i "pandas.tseries.offsets.CustomBusinessMonthBegin.is_on_offset SA01" \ + -i "pandas.tseries.offsets.CustomBusinessMonthBegin.kwds SA01" \ + -i "pandas.tseries.offsets.CustomBusinessMonthBegin.m_offset GL08" \ + -i "pandas.tseries.offsets.CustomBusinessMonthBegin.n GL08" \ + -i "pandas.tseries.offsets.CustomBusinessMonthBegin.name SA01" \ + -i "pandas.tseries.offsets.CustomBusinessMonthBegin.nanos GL08" \ + -i "pandas.tseries.offsets.CustomBusinessMonthBegin.normalize GL08" \ + -i "pandas.tseries.offsets.CustomBusinessMonthBegin.rule_code GL08" \ + -i "pandas.tseries.offsets.CustomBusinessMonthBegin.weekmask GL08" \ + -i "pandas.tseries.offsets.CustomBusinessMonthEnd PR02" \ + -i "pandas.tseries.offsets.CustomBusinessMonthEnd.calendar GL08" \ + -i "pandas.tseries.offsets.CustomBusinessMonthEnd.copy SA01" \ + -i "pandas.tseries.offsets.CustomBusinessMonthEnd.freqstr SA01" \ + -i "pandas.tseries.offsets.CustomBusinessMonthEnd.holidays GL08" \ + -i "pandas.tseries.offsets.CustomBusinessMonthEnd.is_on_offset SA01" \ + -i "pandas.tseries.offsets.CustomBusinessMonthEnd.kwds SA01" \ + -i "pandas.tseries.offsets.CustomBusinessMonthEnd.m_offset GL08" \ + -i "pandas.tseries.offsets.CustomBusinessMonthEnd.n GL08" \ + -i "pandas.tseries.offsets.CustomBusinessMonthEnd.name SA01" \ + -i "pandas.tseries.offsets.CustomBusinessMonthEnd.nanos GL08" \ + -i "pandas.tseries.offsets.CustomBusinessMonthEnd.normalize GL08" \ + -i "pandas.tseries.offsets.CustomBusinessMonthEnd.rule_code GL08" \ + -i "pandas.tseries.offsets.CustomBusinessMonthEnd.weekmask GL08" \ + -i "pandas.tseries.offsets.DateOffset PR02" \ + -i "pandas.tseries.offsets.DateOffset.copy SA01" \ + -i "pandas.tseries.offsets.DateOffset.freqstr SA01" \ + -i "pandas.tseries.offsets.DateOffset.is_on_offset GL08" \ + -i "pandas.tseries.offsets.DateOffset.kwds SA01" \ + -i "pandas.tseries.offsets.DateOffset.n GL08" \ + -i "pandas.tseries.offsets.DateOffset.name SA01" \ + -i "pandas.tseries.offsets.DateOffset.nanos GL08" \ + -i "pandas.tseries.offsets.DateOffset.normalize GL08" \ + -i "pandas.tseries.offsets.DateOffset.rule_code GL08" \ + -i "pandas.tseries.offsets.Day PR02" \ + -i "pandas.tseries.offsets.Day.copy SA01" \ + -i "pandas.tseries.offsets.Day.delta GL08" \ + -i "pandas.tseries.offsets.Day.freqstr SA01" \ + -i "pandas.tseries.offsets.Day.is_on_offset GL08" \ + -i "pandas.tseries.offsets.Day.kwds SA01" \ + -i "pandas.tseries.offsets.Day.n GL08" \ + -i "pandas.tseries.offsets.Day.name SA01" \ + -i "pandas.tseries.offsets.Day.nanos SA01" \ + -i "pandas.tseries.offsets.Day.normalize GL08" \ + -i "pandas.tseries.offsets.Day.rule_code GL08" \ + -i "pandas.tseries.offsets.Easter PR02" \ + -i "pandas.tseries.offsets.Easter.copy SA01" \ + -i "pandas.tseries.offsets.Easter.freqstr SA01" \ + -i "pandas.tseries.offsets.Easter.is_on_offset GL08" \ + -i "pandas.tseries.offsets.Easter.kwds SA01" \ + -i "pandas.tseries.offsets.Easter.n GL08" \ + -i "pandas.tseries.offsets.Easter.name SA01" \ + -i "pandas.tseries.offsets.Easter.nanos GL08" \ + -i "pandas.tseries.offsets.Easter.normalize GL08" \ + -i "pandas.tseries.offsets.Easter.rule_code GL08" \ + -i "pandas.tseries.offsets.FY5253 PR02" \ + -i "pandas.tseries.offsets.FY5253.copy SA01" \ + -i "pandas.tseries.offsets.FY5253.freqstr SA01" \ + -i "pandas.tseries.offsets.FY5253.get_rule_code_suffix GL08" \ + -i "pandas.tseries.offsets.FY5253.get_year_end GL08" \ + -i "pandas.tseries.offsets.FY5253.is_on_offset GL08" \ + -i "pandas.tseries.offsets.FY5253.kwds SA01" \ + -i "pandas.tseries.offsets.FY5253.n GL08" \ + -i "pandas.tseries.offsets.FY5253.name SA01" \ + -i "pandas.tseries.offsets.FY5253.nanos GL08" \ + -i "pandas.tseries.offsets.FY5253.normalize GL08" \ + -i "pandas.tseries.offsets.FY5253.rule_code GL08" \ + -i "pandas.tseries.offsets.FY5253.startingMonth GL08" \ + -i "pandas.tseries.offsets.FY5253.variation GL08" \ + -i "pandas.tseries.offsets.FY5253.weekday GL08" \ + -i "pandas.tseries.offsets.FY5253Quarter PR02" \ + -i "pandas.tseries.offsets.FY5253Quarter.copy SA01" \ + -i "pandas.tseries.offsets.FY5253Quarter.freqstr SA01" \ + -i "pandas.tseries.offsets.FY5253Quarter.get_rule_code_suffix GL08" \ + -i "pandas.tseries.offsets.FY5253Quarter.get_weeks GL08" \ + -i "pandas.tseries.offsets.FY5253Quarter.is_on_offset GL08" \ + -i "pandas.tseries.offsets.FY5253Quarter.kwds SA01" \ + -i "pandas.tseries.offsets.FY5253Quarter.n GL08" \ + -i "pandas.tseries.offsets.FY5253Quarter.name SA01" \ + -i "pandas.tseries.offsets.FY5253Quarter.nanos GL08" \ + -i "pandas.tseries.offsets.FY5253Quarter.normalize GL08" \ + -i "pandas.tseries.offsets.FY5253Quarter.qtr_with_extra_week GL08" \ + -i "pandas.tseries.offsets.FY5253Quarter.rule_code GL08" \ + -i "pandas.tseries.offsets.FY5253Quarter.startingMonth GL08" \ + -i "pandas.tseries.offsets.FY5253Quarter.variation GL08" \ + -i "pandas.tseries.offsets.FY5253Quarter.weekday GL08" \ + -i "pandas.tseries.offsets.FY5253Quarter.year_has_extra_week GL08" \ + -i "pandas.tseries.offsets.Hour PR02" \ + -i "pandas.tseries.offsets.Hour.copy SA01" \ + -i "pandas.tseries.offsets.Hour.delta GL08" \ + -i "pandas.tseries.offsets.Hour.freqstr SA01" \ + -i "pandas.tseries.offsets.Hour.is_on_offset GL08" \ + -i "pandas.tseries.offsets.Hour.kwds SA01" \ + -i "pandas.tseries.offsets.Hour.n GL08" \ + -i "pandas.tseries.offsets.Hour.name SA01" \ + -i "pandas.tseries.offsets.Hour.nanos SA01" \ + -i "pandas.tseries.offsets.Hour.normalize GL08" \ + -i "pandas.tseries.offsets.Hour.rule_code GL08" \ + -i "pandas.tseries.offsets.LastWeekOfMonth PR02,SA01" \ + -i "pandas.tseries.offsets.LastWeekOfMonth.copy SA01" \ + -i "pandas.tseries.offsets.LastWeekOfMonth.freqstr SA01" \ + -i "pandas.tseries.offsets.LastWeekOfMonth.is_on_offset GL08" \ + -i "pandas.tseries.offsets.LastWeekOfMonth.kwds SA01" \ + -i "pandas.tseries.offsets.LastWeekOfMonth.n GL08" \ + -i "pandas.tseries.offsets.LastWeekOfMonth.name SA01" \ + -i "pandas.tseries.offsets.LastWeekOfMonth.nanos GL08" \ + -i "pandas.tseries.offsets.LastWeekOfMonth.normalize GL08" \ + -i "pandas.tseries.offsets.LastWeekOfMonth.rule_code GL08" \ + -i "pandas.tseries.offsets.LastWeekOfMonth.week GL08" \ + -i "pandas.tseries.offsets.LastWeekOfMonth.weekday GL08" \ + -i "pandas.tseries.offsets.Micro PR02" \ + -i "pandas.tseries.offsets.Micro.copy SA01" \ + -i "pandas.tseries.offsets.Micro.delta GL08" \ + -i "pandas.tseries.offsets.Micro.freqstr SA01" \ + -i "pandas.tseries.offsets.Micro.is_on_offset GL08" \ + -i "pandas.tseries.offsets.Micro.kwds SA01" \ + -i "pandas.tseries.offsets.Micro.n GL08" \ + -i "pandas.tseries.offsets.Micro.name SA01" \ + -i "pandas.tseries.offsets.Micro.nanos SA01" \ + -i "pandas.tseries.offsets.Micro.normalize GL08" \ + -i "pandas.tseries.offsets.Micro.rule_code GL08" \ + -i "pandas.tseries.offsets.Milli PR02" \ + -i "pandas.tseries.offsets.Milli.copy SA01" \ + -i "pandas.tseries.offsets.Milli.delta GL08" \ + -i "pandas.tseries.offsets.Milli.freqstr SA01" \ + -i "pandas.tseries.offsets.Milli.is_on_offset GL08" \ + -i "pandas.tseries.offsets.Milli.kwds SA01" \ + -i "pandas.tseries.offsets.Milli.n GL08" \ + -i "pandas.tseries.offsets.Milli.name SA01" \ + -i "pandas.tseries.offsets.Milli.nanos SA01" \ + -i "pandas.tseries.offsets.Milli.normalize GL08" \ + -i "pandas.tseries.offsets.Milli.rule_code GL08" \ + -i "pandas.tseries.offsets.Minute PR02" \ + -i "pandas.tseries.offsets.Minute.copy SA01" \ + -i "pandas.tseries.offsets.Minute.delta GL08" \ + -i "pandas.tseries.offsets.Minute.freqstr SA01" \ + -i "pandas.tseries.offsets.Minute.is_on_offset GL08" \ + -i "pandas.tseries.offsets.Minute.kwds SA01" \ + -i "pandas.tseries.offsets.Minute.n GL08" \ + -i "pandas.tseries.offsets.Minute.name SA01" \ + -i "pandas.tseries.offsets.Minute.nanos SA01" \ + -i "pandas.tseries.offsets.Minute.normalize GL08" \ + -i "pandas.tseries.offsets.Minute.rule_code GL08" \ + -i "pandas.tseries.offsets.MonthBegin PR02" \ + -i "pandas.tseries.offsets.MonthBegin.copy SA01" \ + -i "pandas.tseries.offsets.MonthBegin.freqstr SA01" \ + -i "pandas.tseries.offsets.MonthBegin.is_on_offset GL08" \ + -i "pandas.tseries.offsets.MonthBegin.kwds SA01" \ + -i "pandas.tseries.offsets.MonthBegin.n GL08" \ + -i "pandas.tseries.offsets.MonthBegin.name SA01" \ + -i "pandas.tseries.offsets.MonthBegin.nanos GL08" \ + -i "pandas.tseries.offsets.MonthBegin.normalize GL08" \ + -i "pandas.tseries.offsets.MonthBegin.rule_code GL08" \ + -i "pandas.tseries.offsets.MonthEnd PR02" \ + -i "pandas.tseries.offsets.MonthEnd.copy SA01" \ + -i "pandas.tseries.offsets.MonthEnd.freqstr SA01" \ + -i "pandas.tseries.offsets.MonthEnd.is_on_offset GL08" \ + -i "pandas.tseries.offsets.MonthEnd.kwds SA01" \ + -i "pandas.tseries.offsets.MonthEnd.n GL08" \ + -i "pandas.tseries.offsets.MonthEnd.name SA01" \ + -i "pandas.tseries.offsets.MonthEnd.nanos GL08" \ + -i "pandas.tseries.offsets.MonthEnd.normalize GL08" \ + -i "pandas.tseries.offsets.MonthEnd.rule_code GL08" \ + -i "pandas.tseries.offsets.Nano PR02" \ + -i "pandas.tseries.offsets.Nano.copy SA01" \ + -i "pandas.tseries.offsets.Nano.delta GL08" \ + -i "pandas.tseries.offsets.Nano.freqstr SA01" \ + -i "pandas.tseries.offsets.Nano.is_on_offset GL08" \ + -i "pandas.tseries.offsets.Nano.kwds SA01" \ + -i "pandas.tseries.offsets.Nano.n GL08" \ + -i "pandas.tseries.offsets.Nano.name SA01" \ + -i "pandas.tseries.offsets.Nano.nanos SA01" \ + -i "pandas.tseries.offsets.Nano.normalize GL08" \ + -i "pandas.tseries.offsets.Nano.rule_code GL08" \ + -i "pandas.tseries.offsets.QuarterBegin PR02" \ + -i "pandas.tseries.offsets.QuarterBegin.copy SA01" \ + -i "pandas.tseries.offsets.QuarterBegin.freqstr SA01" \ + -i "pandas.tseries.offsets.QuarterBegin.is_on_offset GL08" \ + -i "pandas.tseries.offsets.QuarterBegin.kwds SA01" \ + -i "pandas.tseries.offsets.QuarterBegin.n GL08" \ + -i "pandas.tseries.offsets.QuarterBegin.name SA01" \ + -i "pandas.tseries.offsets.QuarterBegin.nanos GL08" \ + -i "pandas.tseries.offsets.QuarterBegin.normalize GL08" \ + -i "pandas.tseries.offsets.QuarterBegin.rule_code GL08" \ + -i "pandas.tseries.offsets.QuarterBegin.startingMonth GL08" \ + -i "pandas.tseries.offsets.QuarterEnd PR02" \ + -i "pandas.tseries.offsets.QuarterEnd.copy SA01" \ + -i "pandas.tseries.offsets.QuarterEnd.freqstr SA01" \ + -i "pandas.tseries.offsets.QuarterEnd.is_on_offset GL08" \ + -i "pandas.tseries.offsets.QuarterEnd.kwds SA01" \ + -i "pandas.tseries.offsets.QuarterEnd.n GL08" \ + -i "pandas.tseries.offsets.QuarterEnd.name SA01" \ + -i "pandas.tseries.offsets.QuarterEnd.nanos GL08" \ + -i "pandas.tseries.offsets.QuarterEnd.normalize GL08" \ + -i "pandas.tseries.offsets.QuarterEnd.rule_code GL08" \ + -i "pandas.tseries.offsets.QuarterEnd.startingMonth GL08" \ + -i "pandas.tseries.offsets.Second PR02" \ + -i "pandas.tseries.offsets.Second.copy SA01" \ + -i "pandas.tseries.offsets.Second.delta GL08" \ + -i "pandas.tseries.offsets.Second.freqstr SA01" \ + -i "pandas.tseries.offsets.Second.is_on_offset GL08" \ + -i "pandas.tseries.offsets.Second.kwds SA01" \ + -i "pandas.tseries.offsets.Second.n GL08" \ + -i "pandas.tseries.offsets.Second.name SA01" \ + -i "pandas.tseries.offsets.Second.nanos SA01" \ + -i "pandas.tseries.offsets.Second.normalize GL08" \ + -i "pandas.tseries.offsets.Second.rule_code GL08" \ + -i "pandas.tseries.offsets.SemiMonthBegin PR02,SA01" \ + -i "pandas.tseries.offsets.SemiMonthBegin.copy SA01" \ + -i "pandas.tseries.offsets.SemiMonthBegin.day_of_month GL08" \ + -i "pandas.tseries.offsets.SemiMonthBegin.freqstr SA01" \ + -i "pandas.tseries.offsets.SemiMonthBegin.is_on_offset GL08" \ + -i "pandas.tseries.offsets.SemiMonthBegin.kwds SA01" \ + -i "pandas.tseries.offsets.SemiMonthBegin.n GL08" \ + -i "pandas.tseries.offsets.SemiMonthBegin.name SA01" \ + -i "pandas.tseries.offsets.SemiMonthBegin.nanos GL08" \ + -i "pandas.tseries.offsets.SemiMonthBegin.normalize GL08" \ + -i "pandas.tseries.offsets.SemiMonthBegin.rule_code GL08" \ + -i "pandas.tseries.offsets.SemiMonthEnd PR02,SA01" \ + -i "pandas.tseries.offsets.SemiMonthEnd.copy SA01" \ + -i "pandas.tseries.offsets.SemiMonthEnd.day_of_month GL08" \ + -i "pandas.tseries.offsets.SemiMonthEnd.freqstr SA01" \ + -i "pandas.tseries.offsets.SemiMonthEnd.is_on_offset GL08" \ + -i "pandas.tseries.offsets.SemiMonthEnd.kwds SA01" \ + -i "pandas.tseries.offsets.SemiMonthEnd.n GL08" \ + -i "pandas.tseries.offsets.SemiMonthEnd.name SA01" \ + -i "pandas.tseries.offsets.SemiMonthEnd.nanos GL08" \ + -i "pandas.tseries.offsets.SemiMonthEnd.normalize GL08" \ + -i "pandas.tseries.offsets.SemiMonthEnd.rule_code GL08" \ + -i "pandas.tseries.offsets.Tick GL08" \ + -i "pandas.tseries.offsets.Tick.copy SA01" \ + -i "pandas.tseries.offsets.Tick.delta GL08" \ + -i "pandas.tseries.offsets.Tick.freqstr SA01" \ + -i "pandas.tseries.offsets.Tick.is_on_offset GL08" \ + -i "pandas.tseries.offsets.Tick.kwds SA01" \ + -i "pandas.tseries.offsets.Tick.n GL08" \ + -i "pandas.tseries.offsets.Tick.name SA01" \ + -i "pandas.tseries.offsets.Tick.nanos SA01" \ + -i "pandas.tseries.offsets.Tick.normalize GL08" \ + -i "pandas.tseries.offsets.Tick.rule_code GL08" \ + -i "pandas.tseries.offsets.Week PR02" \ + -i "pandas.tseries.offsets.Week.copy SA01" \ + -i "pandas.tseries.offsets.Week.freqstr SA01" \ + -i "pandas.tseries.offsets.Week.is_on_offset GL08" \ + -i "pandas.tseries.offsets.Week.kwds SA01" \ + -i "pandas.tseries.offsets.Week.n GL08" \ + -i "pandas.tseries.offsets.Week.name SA01" \ + -i "pandas.tseries.offsets.Week.nanos GL08" \ + -i "pandas.tseries.offsets.Week.normalize GL08" \ + -i "pandas.tseries.offsets.Week.rule_code GL08" \ + -i "pandas.tseries.offsets.Week.weekday GL08" \ + -i "pandas.tseries.offsets.WeekOfMonth PR02,SA01" \ + -i "pandas.tseries.offsets.WeekOfMonth.copy SA01" \ + -i "pandas.tseries.offsets.WeekOfMonth.freqstr SA01" \ + -i "pandas.tseries.offsets.WeekOfMonth.is_on_offset GL08" \ + -i "pandas.tseries.offsets.WeekOfMonth.kwds SA01" \ + -i "pandas.tseries.offsets.WeekOfMonth.n GL08" \ + -i "pandas.tseries.offsets.WeekOfMonth.name SA01" \ + -i "pandas.tseries.offsets.WeekOfMonth.nanos GL08" \ + -i "pandas.tseries.offsets.WeekOfMonth.normalize GL08" \ + -i "pandas.tseries.offsets.WeekOfMonth.rule_code GL08" \ + -i "pandas.tseries.offsets.WeekOfMonth.week GL08" \ + -i "pandas.tseries.offsets.WeekOfMonth.weekday GL08" \ + -i "pandas.tseries.offsets.YearBegin PR02" \ + -i "pandas.tseries.offsets.YearBegin.copy SA01" \ + -i "pandas.tseries.offsets.YearBegin.freqstr SA01" \ + -i "pandas.tseries.offsets.YearBegin.is_on_offset GL08" \ + -i "pandas.tseries.offsets.YearBegin.kwds SA01" \ + -i "pandas.tseries.offsets.YearBegin.month GL08" \ + -i "pandas.tseries.offsets.YearBegin.n GL08" \ + -i "pandas.tseries.offsets.YearBegin.name SA01" \ + -i "pandas.tseries.offsets.YearBegin.nanos GL08" \ + -i "pandas.tseries.offsets.YearBegin.normalize GL08" \ + -i "pandas.tseries.offsets.YearBegin.rule_code GL08" \ + -i "pandas.tseries.offsets.YearEnd PR02" \ + -i "pandas.tseries.offsets.YearEnd.copy SA01" \ + -i "pandas.tseries.offsets.YearEnd.freqstr SA01" \ + -i "pandas.tseries.offsets.YearEnd.is_on_offset GL08" \ + -i "pandas.tseries.offsets.YearEnd.kwds SA01" \ + -i "pandas.tseries.offsets.YearEnd.month GL08" \ + -i "pandas.tseries.offsets.YearEnd.n GL08" \ + -i "pandas.tseries.offsets.YearEnd.name SA01" \ + -i "pandas.tseries.offsets.YearEnd.nanos GL08" \ + -i "pandas.tseries.offsets.YearEnd.normalize GL08" \ + -i "pandas.tseries.offsets.YearEnd.rule_code GL08" \ + -i "pandas.unique PR07" \ + -i "pandas.util.hash_array PR07,SA01" \ + -i "pandas.util.hash_pandas_object PR07,SA01" # There should be no backslash in the final line, please keep this comment in the last ignored function RET=$(($RET + $?)) ; echo $MSG "DONE" diff --git a/scripts/tests/test_validate_docstrings.py b/scripts/tests/test_validate_docstrings.py index 72d5c03ab724f..d2e92bb971888 100644 --- a/scripts/tests/test_validate_docstrings.py +++ b/scripts/tests/test_validate_docstrings.py @@ -259,7 +259,7 @@ def test_validate_all_ignore_errors(self, monkeypatch): output_format="default", prefix=None, ignore_deprecated=False, - ignore_errors={"*": {"ER03"}}, + ignore_errors={None: {"ER03"}}, ) # two functions * two not ignored errors assert exit_status == 2 * 2 @@ -269,7 +269,7 @@ def test_validate_all_ignore_errors(self, monkeypatch): prefix=None, ignore_deprecated=False, ignore_errors={ - "*": {"ER03"}, + None: {"ER03"}, "pandas.DataFrame.align": {"ER01"}, # ignoring an error that is not requested should be of no effect "pandas.Index.all": {"ER03"} @@ -399,7 +399,7 @@ def test_exit_status_for_main(self, monkeypatch) -> None: prefix=None, output_format="default", ignore_deprecated=False, - ignore_errors=None, + ignore_errors={}, ) assert exit_status == 3 @@ -429,7 +429,7 @@ def test_exit_status_errors_for_validate_all(self, monkeypatch) -> None: prefix=None, output_format="default", ignore_deprecated=False, - ignore_errors=None, + ignore_errors={}, ) assert exit_status == 5 @@ -447,7 +447,7 @@ def test_no_exit_status_noerrors_for_validate_all(self, monkeypatch) -> None: output_format="default", prefix=None, ignore_deprecated=False, - ignore_errors=None, + ignore_errors={}, ) assert exit_status == 0 @@ -471,7 +471,7 @@ def test_exit_status_for_validate_all_json(self, monkeypatch) -> None: output_format="json", prefix=None, ignore_deprecated=False, - ignore_errors=None, + ignore_errors={}, ) assert exit_status == 0 @@ -515,7 +515,7 @@ def test_errors_param_filters_errors(self, monkeypatch) -> None: output_format="default", prefix=None, ignore_deprecated=False, - ignore_errors={"*": {"ER02", "ER03"}}, + ignore_errors={None: {"ER02", "ER03"}}, ) assert exit_status == 3 @@ -524,6 +524,6 @@ def test_errors_param_filters_errors(self, monkeypatch) -> None: output_format="default", prefix=None, ignore_deprecated=False, - ignore_errors={"*": {"ER01", "ER02"}}, + ignore_errors={None: {"ER01", "ER02"}}, ) assert exit_status == 1 diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py index 0057f97ffa211..55acfaac4d843 100755 --- a/scripts/validate_docstrings.py +++ b/scripts/validate_docstrings.py @@ -365,7 +365,7 @@ def print_validate_all_results( error_messages = dict(res["errors"]) actual_failures = set(error_messages) expected_failures = (ignore_errors.get(func_name, set()) - | ignore_errors.get("*", set())) + | ignore_errors.get(None, set())) for err_code in actual_failures - expected_failures: sys.stdout.write( f'{prefix}{res["file"]}:{res["file_line"]}:' @@ -383,7 +383,8 @@ def print_validate_all_results( return exit_status -def print_validate_one_results(func_name: str) -> int: +def print_validate_one_results(func_name: str, + ignore_errors: dict[str, set[str]]) -> int: def header(title, width=80, char="#") -> str: full_line = char * width side_len = (width - len(title) - 2) // 2 @@ -394,6 +395,9 @@ def header(title, width=80, char="#") -> str: result = pandas_validate(func_name) + result["errors"] = [(code, message) for code, message in result["errors"] + if code not in ignore_errors.get(None, set())] + sys.stderr.write(header(f"Docstring ({func_name})")) sys.stderr.write(f"{result['docstring']}\n") @@ -415,9 +419,13 @@ def header(title, width=80, char="#") -> str: def _format_ignore_errors(raw_ignore_errors): ignore_errors = collections.defaultdict(set) if raw_ignore_errors: - for obj_name, error_codes in raw_ignore_errors: + for error_codes in raw_ignore_errors: + obj_name = None + if " " in error_codes: + obj_name, error_codes = error_codes.split(" ") + # function errors "pandas.Series PR01,SA01" - if obj_name != "*": + if obj_name: if obj_name in ignore_errors: raise ValueError( f"Object `{obj_name}` is present in more than one " @@ -433,7 +441,7 @@ def _format_ignore_errors(raw_ignore_errors): # global errors "PR02,ES01" else: - ignore_errors["*"].update(set(error_codes.split(","))) + ignore_errors[None].update(set(error_codes.split(","))) unknown_errors = ignore_errors["*"] - ALL_ERRORS if unknown_errors: @@ -462,7 +470,7 @@ def main( ignore_errors ) else: - return print_validate_one_results(func_name) + return print_validate_one_results(func_name, ignore_errors) if __name__ == "__main__": @@ -505,11 +513,10 @@ def main( "-i", default=None, action="append", - nargs=2, - metavar=("function", "error_codes"), - help="function for which comma separated list " - "of error codes should not be validated" - "(e.g. pandas.DataFrame.head PR01,SA01). " + help="comma-separated list of error codes " + "(e.g. 'PR02,SA01'), with optional object path " + "to ignore errors for a single object " + "(e.g. pandas.DataFrame.head PR02,SA01). " "Partial validation for more than one function" "can be achieved by repeating this parameter.", )
Follow up of #57879, to use `--ignore_errors PR02,SA01` instead of `--ignore_errors * PR02,SA01`. I also make the script ignore the specified errors when running for a single document (i.e. `validate_docstrings.py pandas.Series -i SA01` was reporting the SA01 error before this PR, and it won't with the changes here). CC: @mroeschke
https://api.github.com/repos/pandas-dev/pandas/pulls/57908
2024-03-19T01:56:50Z
2024-03-19T16:57:33Z
2024-03-19T16:57:33Z
2024-03-29T06:47:58Z
Backport PR #57905 on branch 2.2.x (Revert "Fix issue with Tempita recompilation (#57796)")
diff --git a/pandas/_libs/meson.build b/pandas/_libs/meson.build index 7621915ebcfdb..c27386743c6e9 100644 --- a/pandas/_libs/meson.build +++ b/pandas/_libs/meson.build @@ -54,37 +54,25 @@ _intervaltree_helper = custom_target('intervaltree_helper_pxi', py, tempita, '@INPUT@', '-o', '@OUTDIR@' ] ) - -_algos_pxi_dep = declare_dependency(sources: [_algos_take_helper, _algos_common_helper]) -_khash_pxi_dep = declare_dependency(sources: _khash_primitive_helper) -_hashtable_pxi_dep = declare_dependency( - sources: [_hashtable_class_helper, _hashtable_func_helper] -) -_index_pxi_dep = declare_dependency(sources: _index_class_helper) -_intervaltree_pxi_dep = declare_dependency(sources: _intervaltree_helper) -_sparse_pxi_dep = declare_dependency(sources: _sparse_op_helper) - +_khash_primitive_helper_dep = declare_dependency(sources: _khash_primitive_helper) subdir('tslibs') libs_sources = { # Dict of extension name -> dict of {sources, include_dirs, and deps} # numpy include dir is implicitly included - 'algos': {'sources': ['algos.pyx'], - 'deps': [_khash_pxi_dep, _algos_pxi_dep]}, + 'algos': {'sources': ['algos.pyx', _algos_common_helper, _algos_take_helper], 'deps': _khash_primitive_helper_dep}, 'arrays': {'sources': ['arrays.pyx']}, 'groupby': {'sources': ['groupby.pyx']}, 'hashing': {'sources': ['hashing.pyx']}, - 'hashtable': {'sources': ['hashtable.pyx'], - 'deps': [_khash_pxi_dep, _hashtable_pxi_dep]}, - 'index': {'sources': ['index.pyx'], - 'deps': [_khash_pxi_dep, _index_pxi_dep]}, + 'hashtable': {'sources': ['hashtable.pyx', _hashtable_class_helper, _hashtable_func_helper], 'deps': _khash_primitive_helper_dep}, + 'index': {'sources': ['index.pyx', _index_class_helper], 'deps': _khash_primitive_helper_dep}, 'indexing': {'sources': ['indexing.pyx']}, 'internals': {'sources': ['internals.pyx']}, - 'interval': {'sources': ['interval.pyx'], - 'deps': [_khash_pxi_dep, _intervaltree_pxi_dep]}, - 'join': {'sources': ['join.pyx'], - 'deps': [_khash_pxi_dep]}, + 'interval': {'sources': ['interval.pyx', _intervaltree_helper], + 'deps': _khash_primitive_helper_dep}, + 'join': {'sources': ['join.pyx', _khash_primitive_helper], + 'deps': _khash_primitive_helper_dep}, 'lib': {'sources': ['lib.pyx', 'src/parser/tokenizer.c']}, 'missing': {'sources': ['missing.pyx']}, 'pandas_datetime': {'sources': ['src/vendored/numpy/datetime/np_datetime.c', @@ -95,7 +83,7 @@ libs_sources = { 'src/parser/io.c', 'src/parser/pd_parser.c']}, 'parsers': {'sources': ['parsers.pyx', 'src/parser/tokenizer.c', 'src/parser/io.c'], - 'deps': [_khash_pxi_dep]}, + 'deps': _khash_primitive_helper_dep}, 'json': {'sources': ['src/vendored/ujson/python/ujson.c', 'src/vendored/ujson/python/objToJSON.c', 'src/vendored/ujson/python/JSONtoObj.c', @@ -107,8 +95,7 @@ libs_sources = { 'reshape': {'sources': ['reshape.pyx']}, 'sas': {'sources': ['sas.pyx']}, 'byteswap': {'sources': ['byteswap.pyx']}, - 'sparse': {'sources': ['sparse.pyx'], - 'deps': [_sparse_pxi_dep]}, + 'sparse': {'sources': ['sparse.pyx', _sparse_op_helper]}, 'tslib': {'sources': ['tslib.pyx']}, 'testing': {'sources': ['testing.pyx']}, 'writers': {'sources': ['writers.pyx']}
Backport PR #57905: Revert "Fix issue with Tempita recompilation (#57796)"
https://api.github.com/repos/pandas-dev/pandas/pulls/57907
2024-03-19T00:14:29Z
2024-03-19T00:55:34Z
2024-03-19T00:55:34Z
2024-03-19T00:55:34Z
Revert "Fix issue with Tempita recompilation (#57796)"
diff --git a/pandas/_libs/meson.build b/pandas/_libs/meson.build index 7621915ebcfdb..c27386743c6e9 100644 --- a/pandas/_libs/meson.build +++ b/pandas/_libs/meson.build @@ -54,37 +54,25 @@ _intervaltree_helper = custom_target('intervaltree_helper_pxi', py, tempita, '@INPUT@', '-o', '@OUTDIR@' ] ) - -_algos_pxi_dep = declare_dependency(sources: [_algos_take_helper, _algos_common_helper]) -_khash_pxi_dep = declare_dependency(sources: _khash_primitive_helper) -_hashtable_pxi_dep = declare_dependency( - sources: [_hashtable_class_helper, _hashtable_func_helper] -) -_index_pxi_dep = declare_dependency(sources: _index_class_helper) -_intervaltree_pxi_dep = declare_dependency(sources: _intervaltree_helper) -_sparse_pxi_dep = declare_dependency(sources: _sparse_op_helper) - +_khash_primitive_helper_dep = declare_dependency(sources: _khash_primitive_helper) subdir('tslibs') libs_sources = { # Dict of extension name -> dict of {sources, include_dirs, and deps} # numpy include dir is implicitly included - 'algos': {'sources': ['algos.pyx'], - 'deps': [_khash_pxi_dep, _algos_pxi_dep]}, + 'algos': {'sources': ['algos.pyx', _algos_common_helper, _algos_take_helper], 'deps': _khash_primitive_helper_dep}, 'arrays': {'sources': ['arrays.pyx']}, 'groupby': {'sources': ['groupby.pyx']}, 'hashing': {'sources': ['hashing.pyx']}, - 'hashtable': {'sources': ['hashtable.pyx'], - 'deps': [_khash_pxi_dep, _hashtable_pxi_dep]}, - 'index': {'sources': ['index.pyx'], - 'deps': [_khash_pxi_dep, _index_pxi_dep]}, + 'hashtable': {'sources': ['hashtable.pyx', _hashtable_class_helper, _hashtable_func_helper], 'deps': _khash_primitive_helper_dep}, + 'index': {'sources': ['index.pyx', _index_class_helper], 'deps': _khash_primitive_helper_dep}, 'indexing': {'sources': ['indexing.pyx']}, 'internals': {'sources': ['internals.pyx']}, - 'interval': {'sources': ['interval.pyx'], - 'deps': [_khash_pxi_dep, _intervaltree_pxi_dep]}, - 'join': {'sources': ['join.pyx'], - 'deps': [_khash_pxi_dep]}, + 'interval': {'sources': ['interval.pyx', _intervaltree_helper], + 'deps': _khash_primitive_helper_dep}, + 'join': {'sources': ['join.pyx', _khash_primitive_helper], + 'deps': _khash_primitive_helper_dep}, 'lib': {'sources': ['lib.pyx', 'src/parser/tokenizer.c']}, 'missing': {'sources': ['missing.pyx']}, 'pandas_datetime': {'sources': ['src/vendored/numpy/datetime/np_datetime.c', @@ -95,7 +83,7 @@ libs_sources = { 'src/parser/io.c', 'src/parser/pd_parser.c']}, 'parsers': {'sources': ['parsers.pyx', 'src/parser/tokenizer.c', 'src/parser/io.c'], - 'deps': [_khash_pxi_dep]}, + 'deps': _khash_primitive_helper_dep}, 'json': {'sources': ['src/vendored/ujson/python/ujson.c', 'src/vendored/ujson/python/objToJSON.c', 'src/vendored/ujson/python/JSONtoObj.c', @@ -107,8 +95,7 @@ libs_sources = { 'reshape': {'sources': ['reshape.pyx']}, 'sas': {'sources': ['sas.pyx']}, 'byteswap': {'sources': ['byteswap.pyx']}, - 'sparse': {'sources': ['sparse.pyx'], - 'deps': [_sparse_pxi_dep]}, + 'sparse': {'sources': ['sparse.pyx', _sparse_op_helper]}, 'tslib': {'sources': ['tslib.pyx']}, 'testing': {'sources': ['testing.pyx']}, 'writers': {'sources': ['writers.pyx']}
I don't think this had the intended effect - it did prevent unwanted recompilation but also ignored when the tempita files were updated and a recompilation _should_ occur
https://api.github.com/repos/pandas-dev/pandas/pulls/57905
2024-03-18T23:20:27Z
2024-03-19T00:14:21Z
2024-03-19T00:14:21Z
2024-03-19T00:14:28Z
Allow Dockerfile to use local requirements.txt
diff --git a/Dockerfile b/Dockerfile index c697f0c1c66c7..03f76f39b8cc7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,6 +9,6 @@ RUN apt-get install -y build-essential RUN apt-get install -y libhdf5-dev libgles2-mesa-dev RUN python -m pip install --upgrade pip -RUN python -m pip install \ - -r https://raw.githubusercontent.com/pandas-dev/pandas/main/requirements-dev.txt +COPY requirements-dev.txt /tmp +RUN python -m pip install -r /tmp/requirements-dev.txt CMD ["/bin/bash"]
null
https://api.github.com/repos/pandas-dev/pandas/pulls/57904
2024-03-18T23:18:47Z
2024-03-19T00:15:20Z
2024-03-19T00:15:20Z
2024-03-19T00:54:26Z
DOC: update link in benchmarks.md
diff --git a/web/pandas/community/benchmarks.md b/web/pandas/community/benchmarks.md index ffce00be96bca..1e63832a5a2ba 100644 --- a/web/pandas/community/benchmarks.md +++ b/web/pandas/community/benchmarks.md @@ -75,5 +75,5 @@ There is a quick summary here: The main benchmarks comparing dataframe tools that include pandas are: -- [H2O.ai benchmarks](https://h2oai.github.io/db-benchmark/) +- [DuckDB (former H2O.ai) benchmarks](https://duckdblabs.github.io/db-benchmark/) - [TPCH benchmarks](https://pola.rs/posts/benchmarks/)
- [x] closes #57850 - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. The [H20.ai benchmark link](https://h2oai.github.io/db-benchmark/) in the [benchmarks webpage](https://pandas.pydata.org/community/benchmarks.html) contains the outdated benchmark results (e.g. the last benchmark was performed July 2021 and it used the pandas==1.2.5). DuckDB published a new benchmark results (e.g. the last benchmark was December 2023 with pandas==2.1.1) in the [link](https://duckdblabs.github.io/db-benchmark/) with the same approach described in [their blog](https://duckdb.org/2023/11/03/db-benchmark-update). So it's a good idea to replace the benchmark link. Thank you @rootsmusic for the issue.
https://api.github.com/repos/pandas-dev/pandas/pulls/57903
2024-03-18T22:50:37Z
2024-03-18T23:54:46Z
2024-03-18T23:54:46Z
2024-03-18T23:57:50Z
REF: Avoid new object creation when reverse slicing when possible
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index ba2c936b75d9e..745774b34a3ad 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -2371,11 +2371,12 @@ def factorize( ): if self.freq is not None: # We must be unique, so can short-circuit (and retain freq) - codes = np.arange(len(self), dtype=np.intp) - uniques = self.copy() # TODO: copy or view? if sort and self.freq.n < 0: - codes = codes[::-1] - uniques = uniques[::-1] + codes = np.arange(len(self) - 1, -1, -1, dtype=np.intp) + uniques = self[::-1] + else: + codes = np.arange(len(self), dtype=np.intp) + uniques = self.copy() # TODO: copy or view? return codes, uniques if sort: diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index c2df773326dc9..1357ba4af52c4 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2116,7 +2116,7 @@ def droplevel(self, level: IndexLabel = 0): if not isinstance(level, (tuple, list)): level = [level] - levnums = sorted(self._get_level_number(lev) for lev in level)[::-1] + levnums = sorted((self._get_level_number(lev) for lev in level), reverse=True) return self._drop_level_numbers(levnums) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 2cb05dadd5981..2e554bc848ffe 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -3589,7 +3589,7 @@ def _reorder_indexer( new_order = key_order_map[self.codes[i][indexer]] elif isinstance(k, slice) and k.step is not None and k.step < 0: # flip order for negative step - new_order = np.arange(n)[::-1][indexer] + new_order = np.arange(n - 1, -1, -1)[indexer] elif isinstance(k, slice) and k.start is None and k.stop is None: # slice(None) should not determine order GH#31330 new_order = np.ones((n,), dtype=np.intp)[indexer] diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 82bf8d7c70c7e..84c426b4cfa77 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -65,6 +65,12 @@ _dtype_int64 = np.dtype(np.int64) +def min_fitting_element(start: int, step: int, lower_limit: int) -> int: + """Returns the smallest element greater than or equal to the limit""" + no_steps = -(-(lower_limit - start) // abs(step)) + return start + abs(step) * no_steps + + class RangeIndex(Index): """ Immutable Index implementing a monotonic integer range. @@ -586,25 +592,30 @@ def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]: kwargs.pop("kind", None) # e.g. "mergesort" is irrelevant nv.validate_argsort(args, kwargs) + start, stop, step = None, None, None if self._range.step > 0: - result = np.arange(len(self), dtype=np.intp) + if ascending: + start = len(self) + else: + start, stop, step = len(self) - 1, -1, -1 + elif ascending: + start, stop, step = len(self) - 1, -1, -1 else: - result = np.arange(len(self) - 1, -1, -1, dtype=np.intp) + start = len(self) - if not ascending: - result = result[::-1] - return result + return np.arange(start, stop, step, dtype=np.intp) def factorize( self, sort: bool = False, use_na_sentinel: bool = True, ) -> tuple[npt.NDArray[np.intp], RangeIndex]: - codes = np.arange(len(self), dtype=np.intp) - uniques = self if sort and self.step < 0: - codes = codes[::-1] - uniques = uniques[::-1] + codes = np.arange(len(self) - 1, -1, -1, dtype=np.intp) + uniques = self[::-1] + else: + codes = np.arange(len(self), dtype=np.intp) + uniques = self return codes, uniques def equals(self, other: object) -> bool: @@ -715,26 +726,15 @@ def _intersection(self, other: Index, sort: bool = False): # intersection disregarding the lower bounds tmp_start = first.start + (second.start - first.start) * first.step // gcd * s new_step = first.step * second.step // gcd - new_range = range(tmp_start, int_high, new_step) - new_index = self._simple_new(new_range) # adjust index to limiting interval - new_start = new_index._min_fitting_element(int_low) - new_range = range(new_start, new_index.stop, new_index.step) - new_index = self._simple_new(new_range) + new_start = min_fitting_element(tmp_start, new_step, int_low) + new_range = range(new_start, int_high, new_step) - if (self.step < 0 and other.step < 0) is not (new_index.step < 0): - new_index = new_index[::-1] + if (self.step < 0 and other.step < 0) is not (new_range.step < 0): + new_range = new_range[::-1] - if sort is None: - new_index = new_index.sort_values() - - return new_index - - def _min_fitting_element(self, lower_limit: int) -> int: - """Returns the smallest element greater than or equal to the limit""" - no_steps = -(-(lower_limit - self.start) // abs(self.step)) - return self.start + abs(self.step) * no_steps + return self._simple_new(new_range) def _extended_gcd(self, a: int, b: int) -> tuple[int, int, int]: """ @@ -920,9 +920,9 @@ def _difference(self, other, sort=None): # e.g. range(10) and range(0, 10, 3) return super()._difference(other, sort=sort) - new_index = type(self)._simple_new(new_rng, name=res_name) if first is not self._range: - new_index = new_index[::-1] + new_rng = new_rng[::-1] + new_index = type(self)._simple_new(new_rng, name=res_name) return new_index diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index c7a938dbc4449..c8a2e11dce3d7 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1145,7 +1145,7 @@ def _contains_slice(x: object) -> bool: # GH#41369 Loop in reverse order ensures indexing along columns before rows # which selects only necessary blocks which avoids dtype conversion if possible axis = len(tup) - 1 - for key in tup[::-1]: + for key in reversed(tup): if com.is_null_slice(key): axis -= 1 continue diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index d920ebc60de8c..af851e1fc8224 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1549,9 +1549,9 @@ def _insert_update_blklocs_and_blknos(self, loc) -> None: self._blklocs = np.append(self._blklocs, 0) self._blknos = np.append(self._blknos, len(self.blocks)) elif loc == 0: - # np.append is a lot faster, let's use it if we can. - self._blklocs = np.append(self._blklocs[::-1], 0)[::-1] - self._blknos = np.append(self._blknos[::-1], len(self.blocks))[::-1] + # As of numpy 1.26.4, np.concatenate faster than np.append + self._blklocs = np.concatenate([[0], self._blklocs]) + self._blknos = np.concatenate([[len(self.blocks)], self._blknos]) else: new_blklocs, new_blknos = libinternals.update_blklocs_and_blknos( self.blklocs, self.blknos, loc, len(self.blocks) diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index b28010c13d6dd..ff358e8ba346c 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -910,9 +910,10 @@ def stack_v3(frame: DataFrame, level: list[int]) -> Series | DataFrame: raise ValueError("Columns with duplicate values are not supported in stack") # If we need to drop `level` from columns, it needs to be in descending order + set_levels = set(level) drop_levnums = sorted(level, reverse=True) stack_cols = frame.columns._drop_level_numbers( - [k for k in range(frame.columns.nlevels) if k not in level][::-1] + [k for k in range(frame.columns.nlevels - 1, -1, -1) if k not in set_levels] ) if len(level) > 1: # Arrange columns in the order we want to take them, e.g. level=[2, 0, 1] @@ -936,7 +937,7 @@ def stack_v3(frame: DataFrame, level: list[int]) -> Series | DataFrame: idx = (idx,) gen = iter(idx) column_indexer = tuple( - next(gen) if k in level else slice(None) + next(gen) if k in set_levels else slice(None) for k in range(frame.columns.nlevels) ) data = frame.loc[:, column_indexer] diff --git a/pandas/core/series.py b/pandas/core/series.py index 8a7c1531205e0..08e56cb4925b3 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -5510,9 +5510,9 @@ def case_when( replacements = updated_replacements default = default.astype(common_dtype) - counter = reversed(range(len(conditions))) + counter = range(len(conditions) - 1, -1, -1) for position, condition, replacement in zip( - counter, conditions[::-1], replacements[::-1] + counter, reversed(conditions), reversed(replacements) ): try: default = default.mask( diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index 4774b013fc428..493e856c6dcc6 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -2,6 +2,7 @@ from __future__ import annotations +import itertools from typing import ( TYPE_CHECKING, Callable, @@ -334,13 +335,15 @@ def lexsort_indexer( raise ValueError(f"invalid na_position: {na_position}") if isinstance(orders, bool): - orders = [orders] * len(keys) + orders = itertools.repeat(orders, len(keys)) elif orders is None: - orders = [True] * len(keys) + orders = itertools.repeat(True, len(keys)) + else: + orders = reversed(orders) labels = [] - for k, order in zip(keys, orders): + for k, order in zip(reversed(keys), orders): k = ensure_key_mapped(k, key) if codes_given: codes = cast(np.ndarray, k) @@ -361,7 +364,7 @@ def lexsort_indexer( labels.append(codes) - return np.lexsort(labels[::-1]) + return np.lexsort(labels) def nargsort( diff --git a/pandas/tests/indexes/ranges/test_range.py b/pandas/tests/indexes/ranges/test_range.py index 72762db21b0c5..c9ddbf4464b29 100644 --- a/pandas/tests/indexes/ranges/test_range.py +++ b/pandas/tests/indexes/ranges/test_range.py @@ -9,6 +9,7 @@ RangeIndex, ) import pandas._testing as tm +from pandas.core.indexes.range import min_fitting_element class TestRangeIndex: @@ -419,21 +420,21 @@ def test_extended_gcd(self, simple_index): assert 2 == result[0] def test_min_fitting_element(self): - result = RangeIndex(0, 20, 2)._min_fitting_element(1) + result = min_fitting_element(0, 2, 1) assert 2 == result - result = RangeIndex(1, 6)._min_fitting_element(1) + result = min_fitting_element(1, 1, 1) assert 1 == result - result = RangeIndex(18, -2, -2)._min_fitting_element(1) + result = min_fitting_element(18, -2, 1) assert 2 == result - result = RangeIndex(5, 0, -1)._min_fitting_element(1) + result = min_fitting_element(5, -1, 1) assert 1 == result big_num = 500000000000000000000000 - result = RangeIndex(5, big_num * 2, 1)._min_fitting_element(big_num) + result = min_fitting_element(5, 1, big_num) assert big_num == result def test_slice_specialised(self, simple_index): diff --git a/pandas/tests/indexes/ranges/test_setops.py b/pandas/tests/indexes/ranges/test_setops.py index d417b8b743dc5..ac24ff828cb8f 100644 --- a/pandas/tests/indexes/ranges/test_setops.py +++ b/pandas/tests/indexes/ranges/test_setops.py @@ -93,12 +93,12 @@ def test_intersection(self, sort): # GH 17296: intersect two decreasing RangeIndexes first = RangeIndex(10, -2, -2) other = RangeIndex(5, -4, -1) - expected = first.astype(int).intersection(other.astype(int), sort=sort) - result = first.intersection(other, sort=sort).astype(int) + expected = RangeIndex(start=4, stop=-2, step=-2) + result = first.intersection(other, sort=sort) tm.assert_index_equal(result, expected) # reversed - result = other.intersection(first, sort=sort).astype(int) + result = other.intersection(first, sort=sort) tm.assert_index_equal(result, expected) index = RangeIndex(5, name="foo")
null
https://api.github.com/repos/pandas-dev/pandas/pulls/57902
2024-03-18T21:39:52Z
2024-03-20T16:41:53Z
2024-03-20T16:41:53Z
2024-03-20T16:41:56Z
DOC: Remove doc of deprecated week and weekofyear
diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst index 0f38d90e18616..ecdfb3c565d33 100644 --- a/doc/source/user_guide/timeseries.rst +++ b/doc/source/user_guide/timeseries.rst @@ -797,8 +797,6 @@ There are several time/date properties that one can access from ``Timestamp`` or timetz,"Returns datetime.time as local time with timezone information" dayofyear,"The ordinal day of year" day_of_year,"The ordinal day of year" - weekofyear,"The week ordinal of the year" - week,"The week ordinal of the year" dayofweek,"The number of the day of the week with Monday=0, Sunday=6" day_of_week,"The number of the day of the week with Monday=0, Sunday=6" weekday,"The number of the day of the week with Monday=0, Sunday=6" @@ -812,6 +810,10 @@ There are several time/date properties that one can access from ``Timestamp`` or is_year_end,"Logical indicating if last day of year (defined by frequency)" is_leap_year,"Logical indicating if the date belongs to a leap year" +.. note:: + + You can use ``DatetimeIndex.isocalendar().week`` to access week of year date information. + Furthermore, if you have a ``Series`` with datetimelike values, then you can access these properties via the ``.dt`` accessor, as detailed in the section on :ref:`.dt accessors<basics.dt_accessors>`.
- [x] closes #57887 (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/57901
2024-03-18T21:22:24Z
2024-03-18T23:54:22Z
2024-03-18T23:54:22Z
2024-03-20T03:51:23Z
Backport PR #57889 on branch 2.2.x (BUG: Handle Series construction with Dask, dict-like, Series)
diff --git a/pandas/core/series.py b/pandas/core/series.py index 236085c2a62e1..c1782206d4b67 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -533,7 +533,7 @@ def __init__( data = data.reindex(index, copy=copy) copy = False data = data._mgr - elif is_dict_like(data): + elif isinstance(data, Mapping): data, index = self._init_dict(data, index, dtype) dtype = None copy = False @@ -605,7 +605,7 @@ def __init__( ) def _init_dict( - self, data, index: Index | None = None, dtype: DtypeObj | None = None + self, data: Mapping, index: Index | None = None, dtype: DtypeObj | None = None ): """ Derive the "_mgr" and "index" attributes of a new Series from a
Backport PR #57889: BUG: Handle Series construction with Dask, dict-like, Series
https://api.github.com/repos/pandas-dev/pandas/pulls/57899
2024-03-18T20:38:30Z
2024-03-18T22:01:56Z
2024-03-18T22:01:56Z
2024-03-18T22:01:56Z
Backport PR #57892 on branch 2.2.x (CI: xfail Pyarrow slicing test)
diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index 738442fab8c70..eb890c8b8c0ab 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -30,6 +30,7 @@ pa_version_under13p0, pa_version_under14p0, pa_version_under14p1, + pa_version_under16p0, ) if TYPE_CHECKING: @@ -186,6 +187,7 @@ def get_bz2_file() -> type[pandas.compat.compressors.BZ2File]: "pa_version_under13p0", "pa_version_under14p0", "pa_version_under14p1", + "pa_version_under16p0", "IS64", "ISMUSL", "PY310", diff --git a/pandas/compat/pyarrow.py b/pandas/compat/pyarrow.py index beb4814914101..a2dfa69bbf236 100644 --- a/pandas/compat/pyarrow.py +++ b/pandas/compat/pyarrow.py @@ -15,6 +15,7 @@ pa_version_under14p0 = _palv < Version("14.0.0") pa_version_under14p1 = _palv < Version("14.0.1") pa_version_under15p0 = _palv < Version("15.0.0") + pa_version_under16p0 = _palv < Version("16.0.0") except ImportError: pa_version_under10p1 = True pa_version_under11p0 = True @@ -23,3 +24,4 @@ pa_version_under14p0 = True pa_version_under14p1 = True pa_version_under15p0 = True + pa_version_under16p0 = True diff --git a/pandas/tests/indexes/object/test_indexing.py b/pandas/tests/indexes/object/test_indexing.py index ebf9dac715f8d..443cacf94d239 100644 --- a/pandas/tests/indexes/object/test_indexing.py +++ b/pandas/tests/indexes/object/test_indexing.py @@ -7,6 +7,7 @@ NA, is_matching_na, ) +from pandas.compat import pa_version_under16p0 import pandas.util._test_decorators as td import pandas as pd @@ -200,7 +201,16 @@ class TestSliceLocs: (pd.IndexSlice["m":"m":-1], ""), # type: ignore[misc] ], ) - def test_slice_locs_negative_step(self, in_slice, expected, dtype): + def test_slice_locs_negative_step(self, in_slice, expected, dtype, request): + if ( + not pa_version_under16p0 + and dtype == "string[pyarrow_numpy]" + and in_slice == slice("a", "a", -1) + ): + request.applymarker( + pytest.mark.xfail(reason="https://github.com/apache/arrow/issues/40642") + ) + index = Index(list("bcdxy"), dtype=dtype) s_start, s_stop = index.slice_locs(in_slice.start, in_slice.stop, in_slice.step)
Backport PR #57892: CI: xfail Pyarrow slicing test
https://api.github.com/repos/pandas-dev/pandas/pulls/57898
2024-03-18T18:46:36Z
2024-03-18T20:36:47Z
2024-03-18T20:36:47Z
2024-03-18T20:36:47Z
TST: Reduce some test data sizes
diff --git a/pandas/tests/frame/methods/test_cov_corr.py b/pandas/tests/frame/methods/test_cov_corr.py index 8e73fbf152e79..4d2d83d25e8da 100644 --- a/pandas/tests/frame/methods/test_cov_corr.py +++ b/pandas/tests/frame/methods/test_cov_corr.py @@ -355,8 +355,8 @@ def test_corrwith_series(self, datetime_frame): tm.assert_series_equal(result, expected) def test_corrwith_matches_corrcoef(self): - df1 = DataFrame(np.arange(10000), columns=["a"]) - df2 = DataFrame(np.arange(10000) ** 2, columns=["a"]) + df1 = DataFrame(np.arange(100), columns=["a"]) + df2 = DataFrame(np.arange(100) ** 2, columns=["a"]) c1 = df1.corrwith(df2)["a"] c2 = np.corrcoef(df1["a"], df2["a"])[0][1] diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py index e91ca64bb8970..46f6367fbb3ed 100644 --- a/pandas/tests/groupby/transform/test_transform.py +++ b/pandas/tests/groupby/transform/test_transform.py @@ -87,8 +87,8 @@ def demean(arr): def test_transform_fast(): df = DataFrame( { - "id": np.arange(100000) / 3, - "val": np.random.default_rng(2).standard_normal(100000), + "id": np.arange(10) / 3, + "val": np.random.default_rng(2).standard_normal(10), } ) diff --git a/pandas/tests/indexing/test_chaining_and_caching.py b/pandas/tests/indexing/test_chaining_and_caching.py index cfadf34823b0e..2a2772d1b3453 100644 --- a/pandas/tests/indexing/test_chaining_and_caching.py +++ b/pandas/tests/indexing/test_chaining_and_caching.py @@ -17,15 +17,6 @@ msg = "A value is trying to be set on a copy of a slice from a DataFrame" -def random_text(nobs=100): - # Construct a DataFrame where each row is a random slice from 'letters' - idxs = np.random.default_rng(2).integers(len(ascii_letters), size=(nobs, 2)) - idxs.sort(axis=1) - strings = [ascii_letters[x[0] : x[1]] for x in idxs] - - return DataFrame(strings, columns=["letters"]) - - class TestCaching: def test_slice_consolidate_invalidate_item_cache(self): # this is chained assignment, but will 'work' @@ -233,7 +224,11 @@ def test_detect_chained_assignment_is_copy_pickle(self, temp_file): @pytest.mark.arm_slow def test_detect_chained_assignment_str(self): - df = random_text(100000) + idxs = np.random.default_rng(2).integers(len(ascii_letters), size=(100, 2)) + idxs.sort(axis=1) + strings = [ascii_letters[x[0] : x[1]] for x in idxs] + + df = DataFrame(strings, columns=["letters"]) indexer = df.letters.apply(lambda x: len(x) > 10) df.loc[indexer, "letters"] = df.loc[indexer, "letters"].apply(str.lower) diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py index ce7bb74240c53..8e05a8e6fc5d8 100644 --- a/pandas/tests/io/json/test_ujson.py +++ b/pandas/tests/io/json/test_ujson.py @@ -1036,11 +1036,7 @@ def test_decode_floating_point(self, sign, float_number): ) def test_encode_big_set(self): - s = set() - - for x in range(100000): - s.add(x) - + s = set(range(100000)) # Make sure no Exception is raised. ujson.ujson_dumps(s) diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py index 0ee5ee4ec137d..fecd24c9a4b40 100644 --- a/pandas/tests/resample/test_datetime_index.py +++ b/pandas/tests/resample/test_datetime_index.py @@ -1464,12 +1464,12 @@ def test_resample_nunique_with_date_gap(func, unit): tm.assert_series_equal(result, expected) -@pytest.mark.parametrize("n", [10000, 100000]) -@pytest.mark.parametrize("k", [10, 100, 1000]) -def test_resample_group_info(n, k, unit): +def test_resample_group_info(unit): # GH10914 # use a fixed seed to always have the same uniques + n = 100 + k = 10 prng = np.random.default_rng(2) dr = date_range(start="2015-08-27", periods=n // 10, freq="min").as_unit(unit)
null
https://api.github.com/repos/pandas-dev/pandas/pulls/57897
2024-03-18T18:41:17Z
2024-03-18T20:38:56Z
2024-03-18T20:38:55Z
2024-03-18T21:00:05Z
Testing a Tool
diff --git a/scripts/run_vulture.py b/scripts/run_vulture.py index edf0a1588305c..dfd4d28da7997 100644 --- a/scripts/run_vulture.py +++ b/scripts/run_vulture.py @@ -3,6 +3,7 @@ import argparse import sys +# vulture is unable to import some Dependency issue from vulture import Vulture if __name__ == "__main__": diff --git a/web/pandas_web.py b/web/pandas_web.py index aac07433f2712..2e1e020d6ce70 100755 --- a/web/pandas_web.py +++ b/web/pandas_web.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 """ -Simple static site generator for the pandas web. +Simple static site generator for the pandas web. pandas_web.py takes a directory as parameter, and copies all the files into the target directory after converting markdown files into html and rendering both
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/57894
2024-03-18T18:34:02Z
2024-03-18T18:43:12Z
null
2024-03-18T18:43:12Z
TST: add pytest-localserver to dev deps
diff --git a/environment.yml b/environment.yml index 3528f12c66a8b..e7bf2556d27f8 100644 --- a/environment.yml +++ b/environment.yml @@ -17,6 +17,7 @@ dependencies: - pytest-cov - pytest-xdist>=2.2.0 - pytest-qt>=4.2.0 + - pytest-localserver - pyqt>=5.15.9 - coverage diff --git a/requirements-dev.txt b/requirements-dev.txt index 40c7403cb88e8..0cc064d2660bb 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -10,6 +10,7 @@ pytest>=7.3.2 pytest-cov pytest-xdist>=2.2.0 pytest-qt>=4.2.0 +pytest-localserver PyQt5>=5.15.9 coverage python-dateutil
Needed for the httpserver fixture that we use.
https://api.github.com/repos/pandas-dev/pandas/pulls/57893
2024-03-18T18:09:35Z
2024-03-18T18:54:44Z
2024-03-18T18:54:44Z
2024-03-18T18:56:05Z
CI: xfail Pyarrow slicing test
diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index 1c08df80df477..caa00b205a29c 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -31,6 +31,7 @@ pa_version_under13p0, pa_version_under14p0, pa_version_under14p1, + pa_version_under16p0, ) if TYPE_CHECKING: @@ -187,6 +188,7 @@ def get_bz2_file() -> type[pandas.compat.compressors.BZ2File]: "pa_version_under13p0", "pa_version_under14p0", "pa_version_under14p1", + "pa_version_under16p0", "IS64", "ISMUSL", "PY310", diff --git a/pandas/compat/pyarrow.py b/pandas/compat/pyarrow.py index 5c9e885f8e9f5..5a96e5a4cc49a 100644 --- a/pandas/compat/pyarrow.py +++ b/pandas/compat/pyarrow.py @@ -15,6 +15,7 @@ pa_version_under14p0 = _palv < Version("14.0.0") pa_version_under14p1 = _palv < Version("14.0.1") pa_version_under15p0 = _palv < Version("15.0.0") + pa_version_under16p0 = _palv < Version("16.0.0") except ImportError: pa_version_under10p1 = True pa_version_under11p0 = True @@ -23,3 +24,4 @@ pa_version_under14p0 = True pa_version_under14p1 = True pa_version_under15p0 = True + pa_version_under16p0 = True diff --git a/pandas/tests/indexes/object/test_indexing.py b/pandas/tests/indexes/object/test_indexing.py index 039836da75cd5..34cc8eab4d812 100644 --- a/pandas/tests/indexes/object/test_indexing.py +++ b/pandas/tests/indexes/object/test_indexing.py @@ -7,6 +7,7 @@ NA, is_matching_na, ) +from pandas.compat import pa_version_under16p0 import pandas.util._test_decorators as td import pandas as pd @@ -201,7 +202,16 @@ class TestSliceLocs: (pd.IndexSlice["m":"m":-1], ""), # type: ignore[misc] ], ) - def test_slice_locs_negative_step(self, in_slice, expected, dtype): + def test_slice_locs_negative_step(self, in_slice, expected, dtype, request): + if ( + not pa_version_under16p0 + and dtype == "string[pyarrow_numpy]" + and in_slice == slice("a", "a", -1) + ): + request.applymarker( + pytest.mark.xfail(reason="https://github.com/apache/arrow/issues/40642") + ) + index = Index(list("bcdxy"), dtype=dtype) s_start, s_stop = index.slice_locs(in_slice.start, in_slice.stop, in_slice.step)
https://github.com/apache/arrow/issues/40642
https://api.github.com/repos/pandas-dev/pandas/pulls/57892
2024-03-18T17:36:30Z
2024-03-18T18:46:27Z
2024-03-18T18:46:27Z
2024-03-18T18:46:34Z
BUG: Handle Series construction with Dask, dict-like, Series
diff --git a/pandas/core/series.py b/pandas/core/series.py index 699ff413efb91..8a7c1531205e0 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -452,7 +452,7 @@ def __init__( data = data.reindex(index) copy = False data = data._mgr - elif is_dict_like(data): + elif isinstance(data, Mapping): data, index = self._init_dict(data, index, dtype) dtype = None copy = False @@ -519,7 +519,7 @@ def __init__( ) def _init_dict( - self, data, index: Index | None = None, dtype: DtypeObj | None = None + self, data: Mapping, index: Index | None = None, dtype: DtypeObj | None = None ): """ Derive the "_mgr" and "index" attributes of a new Series from a
Should hopefully fix `test_construct_dask_float_array_int_dtype_match_ndarray` which is failing because dask `Series` now raises on `__bool__`?
https://api.github.com/repos/pandas-dev/pandas/pulls/57889
2024-03-18T17:03:38Z
2024-03-18T20:38:20Z
2024-03-18T20:38:20Z
2024-03-18T20:59:25Z
Backport PR #57883 on branch 2.2.x (Bump pypa/cibuildwheel from 2.16.5 to 2.17.0)
diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index f79b2c51b5f92..470c044d2e99e 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -141,7 +141,7 @@ jobs: - name: Build normal wheels if: ${{ (env.IS_SCHEDULE_DISPATCH != 'true' || env.IS_PUSH == 'true') }} - uses: pypa/cibuildwheel@v2.16.5 + uses: pypa/cibuildwheel@v2.17.0 with: package-dir: ./dist/${{ startsWith(matrix.buildplat[1], 'macosx') && env.sdist_name || needs.build_sdist.outputs.sdist_file }} env: @@ -150,7 +150,7 @@ jobs: - name: Build nightly wheels (with NumPy pre-release) if: ${{ (env.IS_SCHEDULE_DISPATCH == 'true' && env.IS_PUSH != 'true') }} - uses: pypa/cibuildwheel@v2.16.5 + uses: pypa/cibuildwheel@v2.17.0 with: package-dir: ./dist/${{ startsWith(matrix.buildplat[1], 'macosx') && env.sdist_name || needs.build_sdist.outputs.sdist_file }} env:
Backport PR #57883: Bump pypa/cibuildwheel from 2.16.5 to 2.17.0
https://api.github.com/repos/pandas-dev/pandas/pulls/57888
2024-03-18T16:50:25Z
2024-03-18T17:43:02Z
2024-03-18T17:43:02Z
2024-03-18T17:43:02Z
CI: Remove ASAN job
diff --git a/.github/actions/run-tests/action.yml b/.github/actions/run-tests/action.yml index 4a9fe04a8f5f9..66e4142dc0cbb 100644 --- a/.github/actions/run-tests/action.yml +++ b/.github/actions/run-tests/action.yml @@ -1,16 +1,9 @@ name: Run tests and report results -inputs: - preload: - description: Preload arguments for sanitizer - required: false - asan_options: - description: Arguments for Address Sanitizer (ASAN) - required: false runs: using: composite steps: - name: Test - run: ${{ inputs.asan_options }} ${{ inputs.preload }} ci/run_tests.sh + run: ci/run_tests.sh shell: bash -el {0} - name: Publish test results diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index 855973a22886a..f93950224eaae 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -68,14 +68,6 @@ jobs: - name: "Pyarrow Nightly" env_file: actions-311-pyarrownightly.yaml pattern: "not slow and not network and not single_cpu" - - name: "ASAN / UBSAN" - env_file: actions-311-sanitizers.yaml - pattern: "not slow and not network and not single_cpu and not skip_ubsan" - asan_options: "ASAN_OPTIONS=detect_leaks=0" - preload: LD_PRELOAD=$(gcc -print-file-name=libasan.so) - meson_args: --config-settings=setup-args="-Db_sanitize=address,undefined" - cflags_adds: -fno-sanitize-recover=all - pytest_workers: -1 # disable pytest-xdist as it swallows stderr from ASAN fail-fast: false name: ${{ matrix.name || format('ubuntu-latest {0}', matrix.env_file) }} env: @@ -161,18 +153,12 @@ jobs: - name: Test (not single_cpu) uses: ./.github/actions/run-tests if: ${{ matrix.name != 'Pypy' }} - with: - preload: ${{ matrix.preload }} - asan_options: ${{ matrix.asan_options }} env: # Set pattern to not single_cpu if not already set PATTERN: ${{ env.PATTERN == '' && 'not single_cpu' || matrix.pattern }} - name: Test (single_cpu) uses: ./.github/actions/run-tests - with: - preload: ${{ matrix.preload }} - asan_options: ${{ matrix.asan_options }} env: PATTERN: 'single_cpu' PYTEST_WORKERS: 0 diff --git a/ci/deps/actions-311-sanitizers.yaml b/ci/deps/actions-311-sanitizers.yaml deleted file mode 100644 index f5f04c90bffad..0000000000000 --- a/ci/deps/actions-311-sanitizers.yaml +++ /dev/null @@ -1,32 +0,0 @@ -name: pandas-dev -channels: - - conda-forge -dependencies: - - python=3.11 - - # build dependencies - - versioneer[toml] - - cython>=0.29.33 - - meson[ninja]=1.2.1 - - meson-python=0.13.1 - - # test dependencies - - pytest>=7.3.2 - - pytest-cov - - pytest-xdist>=2.2.0 - - pytest-localserver>=0.7.1 - - pytest-qt>=4.2.0 - - boto3 - - hypothesis>=6.46.1 - - pyqt>=5.15.9 - - # required dependencies - - python-dateutil - - numpy - - pytz - - # pandas dependencies - - pip - - - pip: - - "tzdata>=2022.7"
null
https://api.github.com/repos/pandas-dev/pandas/pulls/57886
2024-03-18T14:37:27Z
2024-03-19T05:31:59Z
2024-03-19T05:31:59Z
2024-03-19T05:32:09Z
Bump pypa/cibuildwheel from 2.16.5 to 2.17.0
diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index f79b2c51b5f92..470c044d2e99e 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -141,7 +141,7 @@ jobs: - name: Build normal wheels if: ${{ (env.IS_SCHEDULE_DISPATCH != 'true' || env.IS_PUSH == 'true') }} - uses: pypa/cibuildwheel@v2.16.5 + uses: pypa/cibuildwheel@v2.17.0 with: package-dir: ./dist/${{ startsWith(matrix.buildplat[1], 'macosx') && env.sdist_name || needs.build_sdist.outputs.sdist_file }} env: @@ -150,7 +150,7 @@ jobs: - name: Build nightly wheels (with NumPy pre-release) if: ${{ (env.IS_SCHEDULE_DISPATCH == 'true' && env.IS_PUSH != 'true') }} - uses: pypa/cibuildwheel@v2.16.5 + uses: pypa/cibuildwheel@v2.17.0 with: package-dir: ./dist/${{ startsWith(matrix.buildplat[1], 'macosx') && env.sdist_name || needs.build_sdist.outputs.sdist_file }} env:
Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 2.16.5 to 2.17.0. <details> <summary>Release notes</summary> <p><em>Sourced from <a href="https://github.com/pypa/cibuildwheel/releases">pypa/cibuildwheel's releases</a>.</em></p> <blockquote> <h2>v2.17.0</h2> <ul> <li>🌟 Adds the ability to inherit configuration in TOML overrides. This makes certain configurations much simpler. If you're overriding an option like <code>before-build</code> or <code>environment</code>, and you just want to add an extra command or environment variable, you can just append (or prepend) to the previous config. See <a href="https://cibuildwheel.pypa.io/en/stable/options/#inherit">the docs</a> for more information. (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1730">#1730</a>)</li> <li>🌟 Adds official support for native arm64 macOS GitHub runners. To use them, just specify <code>macos-14</code> as an <code>os</code> of your job in your workflow file. You can also keep <code>macos-13</code> in your build matrix to build x86_64. Check out the new <a href="https://cibuildwheel.pypa.io/en/stable/setup/#github-actions">GitHub Actions example config</a>.</li> <li>✨ You no longer need to specify <code>--platform</code> to run cibuildwheel locally! Instead it will detect your platform automatically. This was a safety feature, no longer necessary. (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1727">#1727</a>)</li> <li>🛠 Removed setuptools and wheel pinned versions. This only affects old-style projects without a <code>pyproject.toml</code>, projects with <code>pyproject.toml</code> are already getting fresh versions of their <code>build-system.requires</code> installed into an isolated environment. (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1725">#1725</a>)</li> <li>🛠 Improve how the GitHub Action passes arguments (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1757">#1757</a>)</li> <li>🛠 Remove a system-wide install of pipx in the GitHub Action (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1745">#1745</a>)</li> <li>🐛 No longer will cibuildwheel override the PIP_CONSTRAINT environment variable when using the <code>build</code> frontend. Instead it will be extended. (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1675">#1675</a>)</li> <li>🐛 Fix a bug where building and testing both x86_86 and arm64 wheels on the same runner caused the wrong architectures in the test environment (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1750">#1750</a>)</li> <li>🐛 Fix a bug that prevented testing a CPython 3.8 wheel targeting macOS 11+ on x86_64 (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1768">#1768</a>)</li> <li>📚 Moved the docs onto the official PyPA domain - they're now available at <a href="https://cibuildwheel.pypa.io">https://cibuildwheel.pypa.io</a> . (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1775">#1775</a>)</li> <li>📚 Docs and examples improvements (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1762">#1762</a>, <a href="https://redirect.github.com/pypa/cibuildwheel/issues/1734">#1734</a>)</li> </ul> </blockquote> </details> <details> <summary>Changelog</summary> <p><em>Sourced from <a href="https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md">pypa/cibuildwheel's changelog</a>.</em></p> <blockquote> <h3>v2.17.0</h3> <p><em>11 March 2024</em></p> <ul> <li>🌟 Adds the ability to inherit configuration in TOML overrides. This makes certain configurations much simpler. If you're overriding an option like <code>before-build</code> or <code>environment</code>, and you just want to add an extra command or environment variable, you can just append (or prepend) to the previous config. See <a href="https://cibuildwheel.pypa.io/en/stable/options/#inherit">the docs</a> for more information. (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1730">#1730</a>)</li> <li>🌟 Adds official support for native arm64 macOS GitHub runners. To use them, just specify <code>macos-14</code> as an <code>os</code> of your job in your workflow file. You can also keep <code>macos-13</code> in your build matrix to build x86_64. Check out the new <a href="https://cibuildwheel.pypa.io/en/stable/setup/#github-actions">GitHub Actions example config</a>.</li> <li>✨ You no longer need to specify <code>--platform</code> to run cibuildwheel locally! Instead it will detect your platform automatically. This was a safety feature, no longer necessary. (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1727">#1727</a>)</li> <li>🛠 Removed setuptools and wheel pinned versions. This only affects old-style projects without a <code>pyproject.toml</code>, projects with <code>pyproject.toml</code> are already getting fresh versions of their <code>build-system.requires</code> installed into an isolated environment. (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1725">#1725</a>)</li> <li>🛠 Improve how the GitHub Action passes arguments (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1757">#1757</a>)</li> <li>🛠 Remove a system-wide install of pipx in the GitHub Action (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1745">#1745</a>)</li> <li>🐛 No longer will cibuildwheel override the PIP_CONSTRAINT environment variable when using the <code>build</code> frontend. Instead it will be extended. (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1675">#1675</a>)</li> <li>🐛 Fix a bug where building and testing both x86_86 and arm64 wheels on the same runner caused the wrong architectures in the test environment (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1750">#1750</a>)</li> <li>🐛 Fix a bug that prevented testing a CPython 3.8 wheel targeting macOS 11+ on x86_64 (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1768">#1768</a>)</li> <li>📚 Moved the docs onto the official PyPA domain - they're now available at <a href="https://cibuildwheel.pypa.io">https://cibuildwheel.pypa.io</a> . (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1775">#1775</a>)</li> <li>📚 Docs and examples improvements (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1762">#1762</a>, <a href="https://redirect.github.com/pypa/cibuildwheel/issues/1734">#1734</a>)</li> </ul> </blockquote> </details> <details> <summary>Commits</summary> <ul> <li><a href="https://github.com/pypa/cibuildwheel/commit/8d945475ac4b1aac4ae08b2fd27db9917158b6ce"><code>8d94547</code></a> Bump version: v2.17.0</li> <li><a href="https://github.com/pypa/cibuildwheel/commit/ca06deb26f92b2b2c6019a3bc223875215fe4cf2"><code>ca06deb</code></a> Merge pull request <a href="https://redirect.github.com/pypa/cibuildwheel/issues/1775">#1775</a> from pypa/doc-domain</li> <li><a href="https://github.com/pypa/cibuildwheel/commit/f7e19222253830775777d4dc7e8cf56aa098d97f"><code>f7e1922</code></a> CirrusCI fixes (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1786">#1786</a>)</li> <li><a href="https://github.com/pypa/cibuildwheel/commit/0d8e919dfc5b7631e641377671db317556dcc7ef"><code>0d8e919</code></a> [Bot] Update dependencies (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1784">#1784</a>)</li> <li><a href="https://github.com/pypa/cibuildwheel/commit/022de07dc13bb25455653a082449a0c038632ac0"><code>022de07</code></a> Merge pull request <a href="https://redirect.github.com/pypa/cibuildwheel/issues/1785">#1785</a> from pypa/revert-1783</li> <li><a href="https://github.com/pypa/cibuildwheel/commit/920f574191fe30782d55398b7a0e70d62c999024"><code>920f574</code></a> Remove manylinux1 docker pin</li> <li><a href="https://github.com/pypa/cibuildwheel/commit/5c06f3c28934b3830d065b17ab853c4465ce6623"><code>5c06f3c</code></a> docs: Add how to run tests in development (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1698">#1698</a>)</li> <li><a href="https://github.com/pypa/cibuildwheel/commit/e2a0839555d4d2ffd366ac4cd933262f5974fd10"><code>e2a0839</code></a> fix: set SYSTEM_VERSION_COMPAT=0 during pip install on macos (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1768">#1768</a>)</li> <li><a href="https://github.com/pypa/cibuildwheel/commit/87fff7728267ddada9c54df079e5864e5c5e5dfb"><code>87fff77</code></a> chore(deps): bump the actions group with 1 update (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1776">#1776</a>)</li> <li><a href="https://github.com/pypa/cibuildwheel/commit/8ef9486aab2cc0aea71870a765265e294d84a679"><code>8ef9486</code></a> Add <code>pedalboard</code> to projects.yml. (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1781">#1781</a>)</li> <li>Additional commits viewable in <a href="https://github.com/pypa/cibuildwheel/compare/v2.16.5...v2.17.0">compare view</a></li> </ul> </details> <br /> [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pypa/cibuildwheel&package-manager=github_actions&previous-version=2.16.5&new-version=2.17.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) --- <details> <summary>Dependabot commands and options</summary> <br /> You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show <dependency name> ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) </details>
https://api.github.com/repos/pandas-dev/pandas/pulls/57883
2024-03-18T08:15:40Z
2024-03-18T16:49:53Z
2024-03-18T16:49:53Z
2024-03-18T16:49:57Z
DOC: Fix a bug in the docstring of stack method
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index d00c659392ef3..8fd0cd8c66e3c 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -9305,7 +9305,7 @@ def stack( section. sort : bool, default True Whether to sort the levels of the resulting MultiIndex. - future_stack : bool, default False + future_stack : bool, default True Whether to use the new implementation that will replace the current implementation in pandas 3.0. When True, dropna and sort have no impact on the result and must remain unspecified. See :ref:`pandas 2.1.0 Release
null
https://api.github.com/repos/pandas-dev/pandas/pulls/57881
2024-03-18T07:07:30Z
2024-03-18T16:29:40Z
2024-03-18T16:29:40Z
2024-03-18T16:29:46Z
EHN: add ability to format index and col names to Styler
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index a9967dcb8efe6..63e5d20160dd2 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -797,8 +797,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then -i "pandas.io.formats.style.Styler.clear SA01" \ -i "pandas.io.formats.style.Styler.concat RT03,SA01" \ -i "pandas.io.formats.style.Styler.export RT03" \ - -i "pandas.io.formats.style.Styler.format RT03" \ - -i "pandas.io.formats.style.Styler.format_index RT03" \ -i "pandas.io.formats.style.Styler.from_custom_template SA01" \ -i "pandas.io.formats.style.Styler.hide RT03,SA01" \ -i "pandas.io.formats.style.Styler.highlight_between RT03" \ @@ -808,7 +806,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then -i "pandas.io.formats.style.Styler.highlight_quantile RT03" \ -i "pandas.io.formats.style.Styler.map RT03" \ -i "pandas.io.formats.style.Styler.map_index RT03" \ - -i "pandas.io.formats.style.Styler.relabel_index RT03" \ -i "pandas.io.formats.style.Styler.set_caption RT03,SA01" \ -i "pandas.io.formats.style.Styler.set_properties RT03,SA01" \ -i "pandas.io.formats.style.Styler.set_sticky RT03,SA01" \ diff --git a/doc/source/reference/style.rst b/doc/source/reference/style.rst index 2256876c93e01..0e1d93841d52f 100644 --- a/doc/source/reference/style.rst +++ b/doc/source/reference/style.rst @@ -41,6 +41,7 @@ Style application Styler.map_index Styler.format Styler.format_index + Styler.format_index_names Styler.relabel_index Styler.hide Styler.concat diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index a398b93b60018..878eb4e79a1d5 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -34,6 +34,8 @@ Other enhancements - Allow dictionaries to be passed to :meth:`pandas.Series.str.replace` via ``pat`` parameter (:issue:`51748`) - Support passing a :class:`Series` input to :func:`json_normalize` that retains the :class:`Series` :class:`Index` (:issue:`51452`) - Users can globally disable any ``PerformanceWarning`` by setting the option ``mode.performance_warnings`` to ``False`` (:issue:`56920`) +- :meth:`Styler.format_index_names` can now be used to format the index and column names (:issue:`48936` and :issue:`47489`) +- .. --------------------------------------------------------------------------- .. _whatsnew_300.notable_bug_fixes: diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 7247e11be874e..ab5f1c039b7ca 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -1683,6 +1683,8 @@ def _copy(self, deepcopy: bool = False) -> Styler: "_display_funcs", "_display_funcs_index", "_display_funcs_columns", + "_display_funcs_index_names", + "_display_funcs_column_names", "hidden_rows", "hidden_columns", "ctx", diff --git a/pandas/io/formats/style_render.py b/pandas/io/formats/style_render.py index 2c93dbe74eace..92afbc0e150ef 100644 --- a/pandas/io/formats/style_render.py +++ b/pandas/io/formats/style_render.py @@ -140,9 +140,15 @@ def __init__( self._display_funcs_index: DefaultDict[ # maps (row, level) -> format func tuple[int, int], Callable[[Any], str] ] = defaultdict(lambda: partial(_default_formatter, precision=precision)) + self._display_funcs_index_names: DefaultDict[ # maps index level -> format func + int, Callable[[Any], str] + ] = defaultdict(lambda: partial(_default_formatter, precision=precision)) self._display_funcs_columns: DefaultDict[ # maps (level, col) -> format func tuple[int, int], Callable[[Any], str] ] = defaultdict(lambda: partial(_default_formatter, precision=precision)) + self._display_funcs_column_names: DefaultDict[ # maps col level -> format func + int, Callable[[Any], str] + ] = defaultdict(lambda: partial(_default_formatter, precision=precision)) def _render( self, @@ -460,6 +466,12 @@ def _generate_col_header_row( ] * (self.index.nlevels - sum(self.hide_index_) - 1) name = self.data.columns.names[r] + + is_display = name is not None and not self.hide_column_names + value = name if is_display else self.css["blank_value"] + display_value = ( + self._display_funcs_column_names[r](value) if is_display else None + ) column_name = [ _element( "th", @@ -468,10 +480,9 @@ def _generate_col_header_row( if name is None else f"{self.css['index_name']} {self.css['level']}{r}" ), - name - if (name is not None and not self.hide_column_names) - else self.css["blank_value"], + value, not all(self.hide_index_), + display_value=display_value, ) ] @@ -553,6 +564,9 @@ def _generate_index_names_row( f"{self.css['index_name']} {self.css['level']}{c}", self.css["blank_value"] if name is None else name, not self.hide_index_[c], + display_value=( + None if name is None else self._display_funcs_index_names[c](name) + ), ) for c, name in enumerate(self.data.index.names) ] @@ -1005,6 +1019,7 @@ def format( Returns ------- Styler + Returns itself for chaining. See Also -------- @@ -1261,6 +1276,7 @@ def format_index( Returns ------- Styler + Returns itself for chaining. See Also -------- @@ -1425,6 +1441,7 @@ def relabel_index( Returns ------- Styler + Returns itself for chaining. See Also -------- @@ -1560,6 +1577,140 @@ def alias_(x, value): return self + def format_index_names( + self, + formatter: ExtFormatter | None = None, + axis: Axis = 0, + level: Level | list[Level] | None = None, + na_rep: str | None = None, + precision: int | None = None, + decimal: str = ".", + thousands: str | None = None, + escape: str | None = None, + hyperlinks: str | None = None, + ) -> StylerRenderer: + r""" + Format the text display value of index names or column names. + + .. versionadded:: 3.0 + + Parameters + ---------- + formatter : str, callable, dict or None + Object to define how values are displayed. See notes. + axis : {0, "index", 1, "columns"} + Whether to apply the formatter to the index or column headers. + level : int, str, list + The level(s) over which to apply the generic formatter. + na_rep : str, optional + Representation for missing values. + If ``na_rep`` is None, no special formatting is applied. + precision : int, optional + Floating point precision to use for display purposes, if not determined by + the specified ``formatter``. + decimal : str, default "." + Character used as decimal separator for floats, complex and integers. + thousands : str, optional, default None + Character used as thousands separator for floats, complex and integers. + escape : str, optional + Use 'html' to replace the characters ``&``, ``<``, ``>``, ``'``, and ``"`` + in cell display string with HTML-safe sequences. + Use 'latex' to replace the characters ``&``, ``%``, ``$``, ``#``, ``_``, + ``{``, ``}``, ``~``, ``^``, and ``\`` in the cell display string with + LaTeX-safe sequences. + Escaping is done before ``formatter``. + hyperlinks : {"html", "latex"}, optional + Convert string patterns containing https://, http://, ftp:// or www. to + HTML <a> tags as clickable URL hyperlinks if "html", or LaTeX \href + commands if "latex". + + Returns + ------- + Styler + Returns itself for chaining. + + Raises + ------ + ValueError + If the `formatter` is a string and the dtypes are incompatible. + + See Also + -------- + Styler.format_index: Format the text display value of index labels + or column headers. + + Notes + ----- + This method has a similar signature to :meth:`Styler.format_index`. Since + `names` are generally label based, and often not numeric, the typical features + expected to be more frequently used here are ``escape`` and ``hyperlinks``. + + .. warning:: + `Styler.format_index_names` is ignored when using the output format + `Styler.to_excel`, since Excel and Python have inherrently different + formatting structures. + + Examples + -------- + >>> df = pd.DataFrame( + ... [[1, 2], [3, 4]], + ... index=pd.Index(["a", "b"], name="idx"), + ... ) + >>> df # doctest: +SKIP + 0 1 + idx + a 1 2 + b 3 4 + >>> df.style.format_index_names(lambda x: x.upper(), axis=0) # doctest: +SKIP + 0 1 + IDX + a 1 2 + b 3 4 + """ + axis = self.data._get_axis_number(axis) + if axis == 0: + display_funcs_, obj = self._display_funcs_index_names, self.index + else: + display_funcs_, obj = self._display_funcs_column_names, self.columns + levels_ = refactor_levels(level, obj) + + if all( + ( + formatter is None, + level is None, + precision is None, + decimal == ".", + thousands is None, + na_rep is None, + escape is None, + hyperlinks is None, + ) + ): + display_funcs_.clear() + return self # clear the formatter / revert to default and avoid looping + + if not isinstance(formatter, dict): + formatter = {level: formatter for level in levels_} + else: + formatter = { + obj._get_level_number(level): formatter_ + for level, formatter_ in formatter.items() + } + + for lvl in levels_: + format_func = _maybe_wrap_formatter( + formatter.get(lvl), + na_rep=na_rep, + precision=precision, + decimal=decimal, + thousands=thousands, + escape=escape, + hyperlinks=hyperlinks, + ) + display_funcs_[lvl] = format_func + + return self + def _element( html_element: str, @@ -1571,7 +1722,7 @@ def _element( """ Template to return container with information for a <td></td> or <th></th> element. """ - if "display_value" not in kwargs: + if "display_value" not in kwargs or kwargs["display_value"] is None: kwargs["display_value"] = value return { "type": html_element, diff --git a/pandas/tests/io/formats/style/test_format.py b/pandas/tests/io/formats/style/test_format.py index 1c84816ead140..ae68fcf9ef1fc 100644 --- a/pandas/tests/io/formats/style/test_format.py +++ b/pandas/tests/io/formats/style/test_format.py @@ -32,10 +32,14 @@ def styler(df): @pytest.fixture def df_multi(): - return DataFrame( - data=np.arange(16).reshape(4, 4), - columns=MultiIndex.from_product([["A", "B"], ["a", "b"]]), - index=MultiIndex.from_product([["X", "Y"], ["x", "y"]]), + return ( + DataFrame( + data=np.arange(16).reshape(4, 4), + columns=MultiIndex.from_product([["A", "B"], ["a", "b"]]), + index=MultiIndex.from_product([["X", "Y"], ["x", "y"]]), + ) + .rename_axis(["0_0", "0_1"], axis=0) + .rename_axis(["1_0", "1_1"], axis=1) ) @@ -560,3 +564,98 @@ def test_relabel_roundtrip(styler): ctx = styler._translate(True, True) assert {"value": "x", "display_value": "x"}.items() <= ctx["body"][0][0].items() assert {"value": "y", "display_value": "y"}.items() <= ctx["body"][1][0].items() + + +@pytest.mark.parametrize("axis", [0, 1]) +@pytest.mark.parametrize( + "level, expected", + [ + (0, ["X", "one"]), # level int + ("zero", ["X", "one"]), # level name + (1, ["zero", "X"]), # other level int + ("one", ["zero", "X"]), # other level name + ([0, 1], ["X", "X"]), # both levels + ([0, "zero"], ["X", "one"]), # level int and name simultaneous + ([0, "one"], ["X", "X"]), # both levels as int and name + (["one", "zero"], ["X", "X"]), # both level names, reversed + ], +) +def test_format_index_names_level(axis, level, expected): + midx = MultiIndex.from_arrays([["_", "_"], ["_", "_"]], names=["zero", "one"]) + df = DataFrame([[1, 2], [3, 4]]) + if axis == 0: + df.index = midx + else: + df.columns = midx + + styler = df.style.format_index_names(lambda v: "X", level=level, axis=axis) + ctx = styler._translate(True, True) + + if axis == 0: # compare index + result = [ctx["head"][1][s]["display_value"] for s in range(2)] + else: # compare columns + result = [ctx["head"][s][0]["display_value"] for s in range(2)] + assert expected == result + + +@pytest.mark.parametrize( + "attr, kwargs", + [ + ("_display_funcs_index_names", {"axis": 0}), + ("_display_funcs_column_names", {"axis": 1}), + ], +) +def test_format_index_names_clear(styler, attr, kwargs): + assert 0 not in getattr(styler, attr) # using default + styler.format_index_names("{:.2f}", **kwargs) + assert 0 in getattr(styler, attr) # formatter is specified + styler.format_index_names(**kwargs) + assert 0 not in getattr(styler, attr) # formatter cleared to default + + +@pytest.mark.parametrize("axis", [0, 1]) +def test_format_index_names_callable(styler_multi, axis): + ctx = styler_multi.format_index_names( + lambda v: v.replace("_", "A"), axis=axis + )._translate(True, True) + result = [ + ctx["head"][2][0]["display_value"], + ctx["head"][2][1]["display_value"], + ctx["head"][0][1]["display_value"], + ctx["head"][1][1]["display_value"], + ] + if axis == 0: + expected = ["0A0", "0A1", "1_0", "1_1"] + else: + expected = ["0_0", "0_1", "1A0", "1A1"] + assert result == expected + + +def test_format_index_names_dict(styler_multi): + ctx = ( + styler_multi.format_index_names({"0_0": "{:<<5}"}) + .format_index_names({"1_1": "{:>>4}"}, axis=1) + ._translate(True, True) + ) + assert ctx["head"][2][0]["display_value"] == "0_0<<" + assert ctx["head"][1][1]["display_value"] == ">1_1" + + +def test_format_index_names_with_hidden_levels(styler_multi): + ctx = styler_multi._translate(True, True) + full_head_height = len(ctx["head"]) + full_head_width = len(ctx["head"][0]) + assert full_head_height == 3 + assert full_head_width == 6 + + ctx = ( + styler_multi.hide(axis=0, level=1) + .hide(axis=1, level=1) + .format_index_names("{:>>4}", axis=1) + .format_index_names("{:!<5}") + ._translate(True, True) + ) + assert len(ctx["head"]) == full_head_height - 1 + assert len(ctx["head"][0]) == full_head_width - 1 + assert ctx["head"][0][0]["display_value"] == ">1_0" + assert ctx["head"][1][0]["display_value"] == "0_0!!" diff --git a/pandas/tests/io/formats/style/test_html.py b/pandas/tests/io/formats/style/test_html.py index 8cb06e3b7619d..2306324efb974 100644 --- a/pandas/tests/io/formats/style/test_html.py +++ b/pandas/tests/io/formats/style/test_html.py @@ -34,6 +34,16 @@ def styler_mi(): return Styler(DataFrame(np.arange(16).reshape(4, 4), index=midx, columns=midx)) +@pytest.fixture +def styler_multi(): + df = DataFrame( + data=np.arange(16).reshape(4, 4), + columns=MultiIndex.from_product([["A", "B"], ["a", "b"]], names=["A&", "b&"]), + index=MultiIndex.from_product([["X", "Y"], ["x", "y"]], names=["X>", "y_"]), + ) + return Styler(df) + + @pytest.fixture def tpl_style(env): return env.get_template("html_style.tpl") @@ -1003,3 +1013,23 @@ def test_to_html_na_rep_non_scalar_data(datapath): </table> """ assert result == expected + + +@pytest.mark.parametrize("escape_axis_0", [True, False]) +@pytest.mark.parametrize("escape_axis_1", [True, False]) +def test_format_index_names(styler_multi, escape_axis_0, escape_axis_1): + if escape_axis_0: + styler_multi.format_index_names(axis=0, escape="html") + expected_index = ["X&gt;", "y_"] + else: + expected_index = ["X>", "y_"] + + if escape_axis_1: + styler_multi.format_index_names(axis=1, escape="html") + expected_columns = ["A&amp;", "b&amp;"] + else: + expected_columns = ["A&", "b&"] + + result = styler_multi.to_html(table_uuid="test") + for expected_str in expected_index + expected_columns: + assert f"{expected_str}</th>" in result diff --git a/pandas/tests/io/formats/style/test_style.py b/pandas/tests/io/formats/style/test_style.py index 6fa72bd48031c..89addbbbc1ded 100644 --- a/pandas/tests/io/formats/style/test_style.py +++ b/pandas/tests/io/formats/style/test_style.py @@ -77,6 +77,8 @@ def mi_styler_comp(mi_styler): columns=mi_styler.columns, ) ) + mi_styler.format_index_names(escape="html", axis=0) + mi_styler.format_index_names(escape="html", axis=1) return mi_styler
- [ ] closes #48936 and closes #47489 - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. xref #57362 Stage one. Part of an multi-stages effort: https://github.com/pandas-dev/pandas/pull/57880#issuecomment-2003636401 Adding a method to Styler and ensuring it works for the default HTML cases with tests in the appropriate pages.
https://api.github.com/repos/pandas-dev/pandas/pulls/57880
2024-03-18T05:58:17Z
2024-03-26T17:26:08Z
2024-03-26T17:26:08Z
2024-04-12T00:11:48Z
CI: Better error control in the validation of docstrings
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 4b8e632f3246c..3c46cb39eeb7e 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -65,1236 +65,1217 @@ fi ### DOCSTRINGS ### if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then - PARAMETERS=(\ - --format=actions\ - --errors=EX01,EX03,EX04,GL01,GL02,GL03,GL04,GL05,GL06,GL07,GL08,GL09,GL10,PD01,PR01,PR02,PR03,PR04,PR05,PR06,PR07,PR08,PR09,PR10,RT01,RT02,RT03,RT04,RT05,SA01,SA02,SA03,SA04,SA05,SS01,SS02,SS03,SS04,SS05,SS06\ - --ignore_errors pandas.Categorical.__array__ SA01\ - --ignore_errors pandas.Categorical.codes SA01\ - --ignore_errors pandas.Categorical.dtype SA01\ - --ignore_errors pandas.Categorical.from_codes SA01\ - --ignore_errors pandas.Categorical.ordered SA01\ - --ignore_errors pandas.CategoricalDtype.categories SA01\ - --ignore_errors pandas.CategoricalDtype.ordered SA01\ - --ignore_errors pandas.CategoricalIndex.codes SA01\ - --ignore_errors pandas.CategoricalIndex.ordered SA01\ - --ignore_errors pandas.DataFrame.__dataframe__ SA01\ - --ignore_errors pandas.DataFrame.__iter__ SA01\ - --ignore_errors pandas.DataFrame.assign SA01\ - --ignore_errors pandas.DataFrame.at_time PR01\ - --ignore_errors pandas.DataFrame.axes SA01\ - --ignore_errors pandas.DataFrame.backfill PR01,SA01\ - --ignore_errors pandas.DataFrame.bfill SA01\ - --ignore_errors pandas.DataFrame.columns SA01\ - --ignore_errors pandas.DataFrame.copy SA01\ - --ignore_errors pandas.DataFrame.droplevel SA01\ - --ignore_errors pandas.DataFrame.dtypes SA01\ - --ignore_errors pandas.DataFrame.ffill SA01\ - --ignore_errors pandas.DataFrame.first_valid_index SA01\ - --ignore_errors pandas.DataFrame.get PR01,SA01\ - --ignore_errors pandas.DataFrame.hist RT03\ - --ignore_errors pandas.DataFrame.infer_objects RT03\ - --ignore_errors pandas.DataFrame.keys SA01\ - --ignore_errors pandas.DataFrame.kurt RT03,SA01\ - --ignore_errors pandas.DataFrame.kurtosis RT03,SA01\ - --ignore_errors pandas.DataFrame.last_valid_index SA01\ - --ignore_errors pandas.DataFrame.mask RT03\ - --ignore_errors pandas.DataFrame.max RT03\ - --ignore_errors pandas.DataFrame.mean RT03,SA01\ - --ignore_errors pandas.DataFrame.median RT03,SA01\ - --ignore_errors pandas.DataFrame.min RT03\ - --ignore_errors pandas.DataFrame.pad PR01,SA01\ - --ignore_errors pandas.DataFrame.plot PR02,SA01\ - --ignore_errors pandas.DataFrame.pop SA01\ - --ignore_errors pandas.DataFrame.prod RT03\ - --ignore_errors pandas.DataFrame.product RT03\ - --ignore_errors pandas.DataFrame.reorder_levels SA01\ - --ignore_errors pandas.DataFrame.sem PR01,RT03,SA01\ - --ignore_errors pandas.DataFrame.skew RT03,SA01\ - --ignore_errors pandas.DataFrame.sparse PR01,SA01\ - --ignore_errors pandas.DataFrame.sparse.density SA01\ - --ignore_errors pandas.DataFrame.sparse.from_spmatrix SA01\ - --ignore_errors pandas.DataFrame.sparse.to_coo SA01\ - --ignore_errors pandas.DataFrame.sparse.to_dense SA01\ - --ignore_errors pandas.DataFrame.std PR01,RT03,SA01\ - --ignore_errors pandas.DataFrame.sum RT03\ - --ignore_errors pandas.DataFrame.swapaxes PR01,SA01\ - --ignore_errors pandas.DataFrame.swaplevel SA01\ - --ignore_errors pandas.DataFrame.to_feather SA01\ - --ignore_errors pandas.DataFrame.to_markdown SA01\ - --ignore_errors pandas.DataFrame.to_parquet RT03\ - --ignore_errors pandas.DataFrame.to_period SA01\ - --ignore_errors pandas.DataFrame.to_timestamp SA01\ - --ignore_errors pandas.DataFrame.tz_convert SA01\ - --ignore_errors pandas.DataFrame.tz_localize SA01\ - --ignore_errors pandas.DataFrame.unstack RT03\ - --ignore_errors pandas.DataFrame.value_counts RT03\ - --ignore_errors pandas.DataFrame.var PR01,RT03,SA01\ - --ignore_errors pandas.DataFrame.where RT03\ - --ignore_errors pandas.DatetimeIndex.ceil SA01\ - --ignore_errors pandas.DatetimeIndex.date SA01\ - --ignore_errors pandas.DatetimeIndex.day SA01\ - --ignore_errors pandas.DatetimeIndex.day_name SA01\ - --ignore_errors pandas.DatetimeIndex.day_of_year SA01\ - --ignore_errors pandas.DatetimeIndex.dayofyear SA01\ - --ignore_errors pandas.DatetimeIndex.floor SA01\ - --ignore_errors pandas.DatetimeIndex.freqstr SA01\ - --ignore_errors pandas.DatetimeIndex.hour SA01\ - --ignore_errors pandas.DatetimeIndex.indexer_at_time PR01,RT03\ - --ignore_errors pandas.DatetimeIndex.indexer_between_time RT03\ - --ignore_errors pandas.DatetimeIndex.inferred_freq SA01\ - --ignore_errors pandas.DatetimeIndex.is_leap_year SA01\ - --ignore_errors pandas.DatetimeIndex.microsecond SA01\ - --ignore_errors pandas.DatetimeIndex.minute SA01\ - --ignore_errors pandas.DatetimeIndex.month SA01\ - --ignore_errors pandas.DatetimeIndex.month_name SA01\ - --ignore_errors pandas.DatetimeIndex.nanosecond SA01\ - --ignore_errors pandas.DatetimeIndex.quarter SA01\ - --ignore_errors pandas.DatetimeIndex.round SA01\ - --ignore_errors pandas.DatetimeIndex.second SA01\ - --ignore_errors pandas.DatetimeIndex.snap PR01,RT03,SA01\ - --ignore_errors pandas.DatetimeIndex.std PR01,RT03\ - --ignore_errors pandas.DatetimeIndex.time SA01\ - --ignore_errors pandas.DatetimeIndex.timetz SA01\ - --ignore_errors pandas.DatetimeIndex.to_period RT03\ - --ignore_errors pandas.DatetimeIndex.to_pydatetime RT03,SA01\ - --ignore_errors pandas.DatetimeIndex.tz SA01\ - --ignore_errors pandas.DatetimeIndex.tz_convert RT03\ - --ignore_errors pandas.DatetimeIndex.year SA01\ - --ignore_errors pandas.DatetimeTZDtype SA01\ - --ignore_errors pandas.DatetimeTZDtype.tz SA01\ - --ignore_errors pandas.DatetimeTZDtype.unit SA01\ - --ignore_errors pandas.ExcelFile PR01,SA01\ - --ignore_errors pandas.ExcelFile.parse PR01,SA01\ - --ignore_errors pandas.ExcelWriter SA01\ - --ignore_errors pandas.Flags SA01\ - --ignore_errors pandas.Float32Dtype SA01\ - --ignore_errors pandas.Float64Dtype SA01\ - --ignore_errors pandas.Grouper PR02,SA01\ - --ignore_errors pandas.HDFStore.append PR01,SA01\ - --ignore_errors pandas.HDFStore.get SA01\ - --ignore_errors pandas.HDFStore.groups SA01\ - --ignore_errors pandas.HDFStore.info RT03,SA01\ - --ignore_errors pandas.HDFStore.keys SA01\ - --ignore_errors pandas.HDFStore.put PR01,SA01\ - --ignore_errors pandas.HDFStore.select SA01\ - --ignore_errors pandas.HDFStore.walk SA01\ - --ignore_errors pandas.Index PR07\ - --ignore_errors pandas.Index.T SA01\ - --ignore_errors pandas.Index.append PR07,RT03,SA01\ - --ignore_errors pandas.Index.astype SA01\ - --ignore_errors pandas.Index.copy PR07,SA01\ - --ignore_errors pandas.Index.difference PR07,RT03,SA01\ - --ignore_errors pandas.Index.drop PR07,SA01\ - --ignore_errors pandas.Index.drop_duplicates RT03\ - --ignore_errors pandas.Index.droplevel RT03,SA01\ - --ignore_errors pandas.Index.dropna RT03,SA01\ - --ignore_errors pandas.Index.dtype SA01\ - --ignore_errors pandas.Index.duplicated RT03\ - --ignore_errors pandas.Index.empty GL08\ - --ignore_errors pandas.Index.equals SA01\ - --ignore_errors pandas.Index.fillna RT03\ - --ignore_errors pandas.Index.get_indexer PR07,SA01\ - --ignore_errors pandas.Index.get_indexer_for PR01,SA01\ - --ignore_errors pandas.Index.get_indexer_non_unique PR07,SA01\ - --ignore_errors pandas.Index.get_loc PR07,RT03,SA01\ - --ignore_errors pandas.Index.get_slice_bound PR07\ - --ignore_errors pandas.Index.hasnans SA01\ - --ignore_errors pandas.Index.identical PR01,SA01\ - --ignore_errors pandas.Index.inferred_type SA01\ - --ignore_errors pandas.Index.insert PR07,RT03,SA01\ - --ignore_errors pandas.Index.intersection PR07,RT03,SA01\ - --ignore_errors pandas.Index.item SA01\ - --ignore_errors pandas.Index.join PR07,RT03,SA01\ - --ignore_errors pandas.Index.map SA01\ - --ignore_errors pandas.Index.memory_usage RT03\ - --ignore_errors pandas.Index.name SA01\ - --ignore_errors pandas.Index.names GL08\ - --ignore_errors pandas.Index.nbytes SA01\ - --ignore_errors pandas.Index.ndim SA01\ - --ignore_errors pandas.Index.nunique RT03\ - --ignore_errors pandas.Index.putmask PR01,RT03\ - --ignore_errors pandas.Index.ravel PR01,RT03\ - --ignore_errors pandas.Index.reindex PR07\ - --ignore_errors pandas.Index.shape SA01\ - --ignore_errors pandas.Index.size SA01\ - --ignore_errors pandas.Index.slice_indexer PR07,RT03,SA01\ - --ignore_errors pandas.Index.slice_locs RT03\ - --ignore_errors pandas.Index.str PR01,SA01\ - --ignore_errors pandas.Index.symmetric_difference PR07,RT03,SA01\ - --ignore_errors pandas.Index.take PR01,PR07\ - --ignore_errors pandas.Index.to_list RT03\ - --ignore_errors pandas.Index.union PR07,RT03,SA01\ - --ignore_errors pandas.Index.unique RT03\ - --ignore_errors pandas.Index.value_counts RT03\ - --ignore_errors pandas.Index.view GL08\ - --ignore_errors pandas.Int16Dtype SA01\ - --ignore_errors pandas.Int32Dtype SA01\ - --ignore_errors pandas.Int64Dtype SA01\ - --ignore_errors pandas.Int8Dtype SA01\ - --ignore_errors pandas.Interval PR02\ - --ignore_errors pandas.Interval.closed SA01\ - --ignore_errors pandas.Interval.left SA01\ - --ignore_errors pandas.Interval.mid SA01\ - --ignore_errors pandas.Interval.right SA01\ - --ignore_errors pandas.IntervalDtype PR01,SA01\ - --ignore_errors pandas.IntervalDtype.subtype SA01\ - --ignore_errors pandas.IntervalIndex.closed SA01\ - --ignore_errors pandas.IntervalIndex.contains RT03\ - --ignore_errors pandas.IntervalIndex.get_indexer PR07,SA01\ - --ignore_errors pandas.IntervalIndex.get_loc PR07,RT03,SA01\ - --ignore_errors pandas.IntervalIndex.is_non_overlapping_monotonic SA01\ - --ignore_errors pandas.IntervalIndex.left GL08\ - --ignore_errors pandas.IntervalIndex.length GL08\ - --ignore_errors pandas.IntervalIndex.mid GL08\ - --ignore_errors pandas.IntervalIndex.right GL08\ - --ignore_errors pandas.IntervalIndex.set_closed RT03,SA01\ - --ignore_errors pandas.IntervalIndex.to_tuples RT03,SA01\ - --ignore_errors pandas.MultiIndex PR01\ - --ignore_errors pandas.MultiIndex.append PR07,SA01\ - --ignore_errors pandas.MultiIndex.copy PR07,RT03,SA01\ - --ignore_errors pandas.MultiIndex.drop PR07,RT03,SA01\ - --ignore_errors pandas.MultiIndex.droplevel RT03,SA01\ - --ignore_errors pandas.MultiIndex.dtypes SA01\ - --ignore_errors pandas.MultiIndex.get_indexer PR07,SA01\ - --ignore_errors pandas.MultiIndex.get_level_values SA01\ - --ignore_errors pandas.MultiIndex.get_loc PR07\ - --ignore_errors pandas.MultiIndex.get_loc_level PR07\ - --ignore_errors pandas.MultiIndex.levels SA01\ - --ignore_errors pandas.MultiIndex.levshape SA01\ - --ignore_errors pandas.MultiIndex.names SA01\ - --ignore_errors pandas.MultiIndex.nlevels SA01\ - --ignore_errors pandas.MultiIndex.remove_unused_levels RT03,SA01\ - --ignore_errors pandas.MultiIndex.reorder_levels RT03,SA01\ - --ignore_errors pandas.MultiIndex.set_codes SA01\ - --ignore_errors pandas.MultiIndex.set_levels RT03,SA01\ - --ignore_errors pandas.MultiIndex.sortlevel PR07,SA01\ - --ignore_errors pandas.MultiIndex.to_frame RT03\ - --ignore_errors pandas.MultiIndex.truncate SA01\ - --ignore_errors pandas.NA SA01\ - --ignore_errors pandas.NaT SA01\ - --ignore_errors pandas.NamedAgg SA01\ - --ignore_errors pandas.Period SA01\ - --ignore_errors pandas.Period.asfreq SA01\ - --ignore_errors pandas.Period.freq GL08\ - --ignore_errors pandas.Period.freqstr SA01\ - --ignore_errors pandas.Period.is_leap_year SA01\ - --ignore_errors pandas.Period.month SA01\ - --ignore_errors pandas.Period.now SA01\ - --ignore_errors pandas.Period.ordinal GL08\ - --ignore_errors pandas.Period.quarter SA01\ - --ignore_errors pandas.Period.strftime PR01,SA01\ - --ignore_errors pandas.Period.to_timestamp SA01\ - --ignore_errors pandas.Period.year SA01\ - --ignore_errors pandas.PeriodDtype SA01\ - --ignore_errors pandas.PeriodDtype.freq SA01\ - --ignore_errors pandas.PeriodIndex.day SA01\ - --ignore_errors pandas.PeriodIndex.day_of_week SA01\ - --ignore_errors pandas.PeriodIndex.day_of_year SA01\ - --ignore_errors pandas.PeriodIndex.dayofweek SA01\ - --ignore_errors pandas.PeriodIndex.dayofyear SA01\ - --ignore_errors pandas.PeriodIndex.days_in_month SA01\ - --ignore_errors pandas.PeriodIndex.daysinmonth SA01\ - --ignore_errors pandas.PeriodIndex.freq GL08\ - --ignore_errors pandas.PeriodIndex.freqstr SA01\ - --ignore_errors pandas.PeriodIndex.from_fields PR07,SA01\ - --ignore_errors pandas.PeriodIndex.from_ordinals SA01\ - --ignore_errors pandas.PeriodIndex.hour SA01\ - --ignore_errors pandas.PeriodIndex.is_leap_year SA01\ - --ignore_errors pandas.PeriodIndex.minute SA01\ - --ignore_errors pandas.PeriodIndex.month SA01\ - --ignore_errors pandas.PeriodIndex.quarter SA01\ - --ignore_errors pandas.PeriodIndex.qyear GL08\ - --ignore_errors pandas.PeriodIndex.second SA01\ - --ignore_errors pandas.PeriodIndex.to_timestamp RT03,SA01\ - --ignore_errors pandas.PeriodIndex.week SA01\ - --ignore_errors pandas.PeriodIndex.weekday SA01\ - --ignore_errors pandas.PeriodIndex.weekofyear SA01\ - --ignore_errors pandas.PeriodIndex.year SA01\ - --ignore_errors pandas.RangeIndex PR07\ - --ignore_errors pandas.RangeIndex.from_range PR01,SA01\ - --ignore_errors pandas.RangeIndex.start SA01\ - --ignore_errors pandas.RangeIndex.step SA01\ - --ignore_errors pandas.RangeIndex.stop SA01\ - --ignore_errors pandas.Series SA01\ - --ignore_errors pandas.Series.T SA01\ - --ignore_errors pandas.Series.__iter__ RT03,SA01\ - --ignore_errors pandas.Series.add PR07\ - --ignore_errors pandas.Series.align PR07,SA01\ - --ignore_errors pandas.Series.astype RT03\ - --ignore_errors pandas.Series.at_time PR01,RT03\ - --ignore_errors pandas.Series.backfill PR01,SA01\ - --ignore_errors pandas.Series.bfill SA01\ - --ignore_errors pandas.Series.case_when RT03\ - --ignore_errors pandas.Series.cat PR07,SA01\ - --ignore_errors pandas.Series.cat.add_categories PR01,PR02\ - --ignore_errors pandas.Series.cat.as_ordered PR01\ - --ignore_errors pandas.Series.cat.as_unordered PR01\ - --ignore_errors pandas.Series.cat.codes SA01\ - --ignore_errors pandas.Series.cat.ordered SA01\ - --ignore_errors pandas.Series.cat.remove_categories PR01,PR02\ - --ignore_errors pandas.Series.cat.remove_unused_categories PR01\ - --ignore_errors pandas.Series.cat.rename_categories PR01,PR02\ - --ignore_errors pandas.Series.cat.reorder_categories PR01,PR02\ - --ignore_errors pandas.Series.cat.set_categories PR01,PR02,RT03\ - --ignore_errors pandas.Series.copy SA01\ - --ignore_errors pandas.Series.div PR07\ - --ignore_errors pandas.Series.droplevel SA01\ - --ignore_errors pandas.Series.dt PR01`# Accessors are implemented as classes, but we do not document the Parameters section` \ - --ignore_errors pandas.Series.dt.as_unit GL08,PR01,PR02\ - --ignore_errors pandas.Series.dt.ceil PR01,PR02,SA01\ - --ignore_errors pandas.Series.dt.components SA01\ - --ignore_errors pandas.Series.dt.date SA01\ - --ignore_errors pandas.Series.dt.day SA01\ - --ignore_errors pandas.Series.dt.day_name PR01,PR02,SA01\ - --ignore_errors pandas.Series.dt.day_of_year SA01\ - --ignore_errors pandas.Series.dt.dayofyear SA01\ - --ignore_errors pandas.Series.dt.days SA01\ - --ignore_errors pandas.Series.dt.days_in_month SA01\ - --ignore_errors pandas.Series.dt.daysinmonth SA01\ - --ignore_errors pandas.Series.dt.floor PR01,PR02,SA01\ - --ignore_errors pandas.Series.dt.freq GL08\ - --ignore_errors pandas.Series.dt.hour SA01\ - --ignore_errors pandas.Series.dt.is_leap_year SA01\ - --ignore_errors pandas.Series.dt.microsecond SA01\ - --ignore_errors pandas.Series.dt.microseconds SA01\ - --ignore_errors pandas.Series.dt.minute SA01\ - --ignore_errors pandas.Series.dt.month SA01\ - --ignore_errors pandas.Series.dt.month_name PR01,PR02,SA01\ - --ignore_errors pandas.Series.dt.nanosecond SA01\ - --ignore_errors pandas.Series.dt.nanoseconds SA01\ - --ignore_errors pandas.Series.dt.normalize PR01\ - --ignore_errors pandas.Series.dt.quarter SA01\ - --ignore_errors pandas.Series.dt.qyear GL08\ - --ignore_errors pandas.Series.dt.round PR01,PR02,SA01\ - --ignore_errors pandas.Series.dt.second SA01\ - --ignore_errors pandas.Series.dt.seconds SA01\ - --ignore_errors pandas.Series.dt.strftime PR01,PR02\ - --ignore_errors pandas.Series.dt.time SA01\ - --ignore_errors pandas.Series.dt.timetz SA01\ - --ignore_errors pandas.Series.dt.to_period PR01,PR02,RT03\ - --ignore_errors pandas.Series.dt.total_seconds PR01\ - --ignore_errors pandas.Series.dt.tz SA01\ - --ignore_errors pandas.Series.dt.tz_convert PR01,PR02,RT03\ - --ignore_errors pandas.Series.dt.tz_localize PR01,PR02\ - --ignore_errors pandas.Series.dt.unit GL08\ - --ignore_errors pandas.Series.dt.year SA01\ - --ignore_errors pandas.Series.dtype SA01\ - --ignore_errors pandas.Series.dtypes SA01\ - --ignore_errors pandas.Series.empty GL08\ - --ignore_errors pandas.Series.eq PR07,SA01\ - --ignore_errors pandas.Series.ewm RT03\ - --ignore_errors pandas.Series.expanding RT03\ - --ignore_errors pandas.Series.ffill SA01\ - --ignore_errors pandas.Series.filter RT03\ - --ignore_errors pandas.Series.first_valid_index RT03,SA01\ - --ignore_errors pandas.Series.floordiv PR07\ - --ignore_errors pandas.Series.ge PR07,SA01\ - --ignore_errors pandas.Series.get PR01,PR07,RT03,SA01\ - --ignore_errors pandas.Series.gt PR07,SA01\ - --ignore_errors pandas.Series.hasnans SA01\ - --ignore_errors pandas.Series.infer_objects RT03\ - --ignore_errors pandas.Series.is_monotonic_decreasing SA01\ - --ignore_errors pandas.Series.is_monotonic_increasing SA01\ - --ignore_errors pandas.Series.is_unique SA01\ - --ignore_errors pandas.Series.item SA01\ - --ignore_errors pandas.Series.keys SA01\ - --ignore_errors pandas.Series.kurt RT03,SA01\ - --ignore_errors pandas.Series.kurtosis RT03,SA01\ - --ignore_errors pandas.Series.last_valid_index RT03,SA01\ - --ignore_errors pandas.Series.le PR07,SA01\ - --ignore_errors pandas.Series.list.__getitem__ SA01\ - --ignore_errors pandas.Series.list.flatten SA01\ - --ignore_errors pandas.Series.list.len SA01\ - --ignore_errors pandas.Series.lt PR07,SA01\ - --ignore_errors pandas.Series.mask RT03\ - --ignore_errors pandas.Series.max RT03\ - --ignore_errors pandas.Series.mean RT03,SA01\ - --ignore_errors pandas.Series.median RT03,SA01\ - --ignore_errors pandas.Series.min RT03\ - --ignore_errors pandas.Series.mod PR07\ - --ignore_errors pandas.Series.mode SA01\ - --ignore_errors pandas.Series.mul PR07\ - --ignore_errors pandas.Series.nbytes SA01\ - --ignore_errors pandas.Series.ndim SA01\ - --ignore_errors pandas.Series.ne PR07,SA01\ - --ignore_errors pandas.Series.nunique RT03\ - --ignore_errors pandas.Series.pad PR01,SA01\ - --ignore_errors pandas.Series.pipe RT03\ - --ignore_errors pandas.Series.plot PR02,SA01\ - --ignore_errors pandas.Series.plot.box RT03\ - --ignore_errors pandas.Series.plot.density RT03\ - --ignore_errors pandas.Series.plot.kde RT03\ - --ignore_errors pandas.Series.pop RT03,SA01\ - --ignore_errors pandas.Series.pow PR07\ - --ignore_errors pandas.Series.prod RT03\ - --ignore_errors pandas.Series.product RT03\ - --ignore_errors pandas.Series.radd PR07\ - --ignore_errors pandas.Series.rdiv PR07\ - --ignore_errors pandas.Series.reindex RT03\ - --ignore_errors pandas.Series.reorder_levels RT03,SA01\ - --ignore_errors pandas.Series.rfloordiv PR07\ - --ignore_errors pandas.Series.rmod PR07\ - --ignore_errors pandas.Series.rmul PR07\ - --ignore_errors pandas.Series.rolling PR07\ - --ignore_errors pandas.Series.rpow PR07\ - --ignore_errors pandas.Series.rsub PR07\ - --ignore_errors pandas.Series.rtruediv PR07\ - --ignore_errors pandas.Series.sem PR01,RT03,SA01\ - --ignore_errors pandas.Series.shape SA01\ - --ignore_errors pandas.Series.size SA01\ - --ignore_errors pandas.Series.skew RT03,SA01\ - --ignore_errors pandas.Series.sparse PR01,SA01\ - --ignore_errors pandas.Series.sparse.density SA01\ - --ignore_errors pandas.Series.sparse.fill_value SA01\ - --ignore_errors pandas.Series.sparse.from_coo PR07,SA01\ - --ignore_errors pandas.Series.sparse.npoints SA01\ - --ignore_errors pandas.Series.sparse.sp_values SA01\ - --ignore_errors pandas.Series.sparse.to_coo PR07,RT03,SA01\ - --ignore_errors pandas.Series.std PR01,RT03,SA01\ - --ignore_errors pandas.Series.str PR01,SA01\ - --ignore_errors pandas.Series.str.capitalize RT03\ - --ignore_errors pandas.Series.str.casefold RT03\ - --ignore_errors pandas.Series.str.center RT03,SA01\ - --ignore_errors pandas.Series.str.decode PR07,RT03,SA01\ - --ignore_errors pandas.Series.str.encode PR07,RT03,SA01\ - --ignore_errors pandas.Series.str.find RT03\ - --ignore_errors pandas.Series.str.fullmatch RT03\ - --ignore_errors pandas.Series.str.get RT03,SA01\ - --ignore_errors pandas.Series.str.index RT03\ - --ignore_errors pandas.Series.str.ljust RT03,SA01\ - --ignore_errors pandas.Series.str.lower RT03\ - --ignore_errors pandas.Series.str.lstrip RT03\ - --ignore_errors pandas.Series.str.match RT03\ - --ignore_errors pandas.Series.str.normalize RT03,SA01\ - --ignore_errors pandas.Series.str.partition RT03\ - --ignore_errors pandas.Series.str.repeat SA01\ - --ignore_errors pandas.Series.str.replace SA01\ - --ignore_errors pandas.Series.str.rfind RT03\ - --ignore_errors pandas.Series.str.rindex RT03\ - --ignore_errors pandas.Series.str.rjust RT03,SA01\ - --ignore_errors pandas.Series.str.rpartition RT03\ - --ignore_errors pandas.Series.str.rstrip RT03\ - --ignore_errors pandas.Series.str.strip RT03\ - --ignore_errors pandas.Series.str.swapcase RT03\ - --ignore_errors pandas.Series.str.title RT03\ - --ignore_errors pandas.Series.str.translate RT03,SA01\ - --ignore_errors pandas.Series.str.upper RT03\ - --ignore_errors pandas.Series.str.wrap PR01,RT03,SA01\ - --ignore_errors pandas.Series.str.zfill RT03\ - --ignore_errors pandas.Series.struct.dtypes SA01\ - --ignore_errors pandas.Series.sub PR07\ - --ignore_errors pandas.Series.sum RT03\ - --ignore_errors pandas.Series.swaplevel SA01\ - --ignore_errors pandas.Series.to_dict SA01\ - --ignore_errors pandas.Series.to_frame SA01\ - --ignore_errors pandas.Series.to_hdf PR07\ - --ignore_errors pandas.Series.to_list RT03\ - --ignore_errors pandas.Series.to_markdown SA01\ - --ignore_errors pandas.Series.to_numpy RT03\ - --ignore_errors pandas.Series.to_period SA01\ - --ignore_errors pandas.Series.to_string SA01\ - --ignore_errors pandas.Series.to_timestamp RT03,SA01\ - --ignore_errors pandas.Series.truediv PR07\ - --ignore_errors pandas.Series.tz_convert SA01\ - --ignore_errors pandas.Series.tz_localize SA01\ - --ignore_errors pandas.Series.unstack SA01\ - --ignore_errors pandas.Series.update PR07,SA01\ - --ignore_errors pandas.Series.value_counts RT03\ - --ignore_errors pandas.Series.var PR01,RT03,SA01\ - --ignore_errors pandas.Series.where RT03\ - --ignore_errors pandas.SparseDtype SA01\ - --ignore_errors pandas.Timedelta PR07,SA01\ - --ignore_errors pandas.Timedelta.as_unit SA01\ - --ignore_errors pandas.Timedelta.asm8 SA01\ - --ignore_errors pandas.Timedelta.ceil SA01\ - --ignore_errors pandas.Timedelta.components SA01\ - --ignore_errors pandas.Timedelta.days SA01\ - --ignore_errors pandas.Timedelta.floor SA01\ - --ignore_errors pandas.Timedelta.max PR02,PR07,SA01\ - --ignore_errors pandas.Timedelta.min PR02,PR07,SA01\ - --ignore_errors pandas.Timedelta.resolution PR02,PR07,SA01\ - --ignore_errors pandas.Timedelta.round SA01\ - --ignore_errors pandas.Timedelta.to_numpy PR01\ - --ignore_errors pandas.Timedelta.to_timedelta64 SA01\ - --ignore_errors pandas.Timedelta.total_seconds SA01\ - --ignore_errors pandas.Timedelta.view SA01\ - --ignore_errors pandas.TimedeltaIndex PR01\ - --ignore_errors pandas.TimedeltaIndex.as_unit RT03,SA01\ - --ignore_errors pandas.TimedeltaIndex.ceil SA01\ - --ignore_errors pandas.TimedeltaIndex.components SA01\ - --ignore_errors pandas.TimedeltaIndex.days SA01\ - --ignore_errors pandas.TimedeltaIndex.floor SA01\ - --ignore_errors pandas.TimedeltaIndex.inferred_freq SA01\ - --ignore_errors pandas.TimedeltaIndex.mean PR07\ - --ignore_errors pandas.TimedeltaIndex.microseconds SA01\ - --ignore_errors pandas.TimedeltaIndex.nanoseconds SA01\ - --ignore_errors pandas.TimedeltaIndex.round SA01\ - --ignore_errors pandas.TimedeltaIndex.seconds SA01\ - --ignore_errors pandas.TimedeltaIndex.to_pytimedelta RT03,SA01\ - --ignore_errors pandas.Timestamp PR07,SA01\ - --ignore_errors pandas.Timestamp.as_unit SA01\ - --ignore_errors pandas.Timestamp.asm8 SA01\ - --ignore_errors pandas.Timestamp.astimezone SA01\ - --ignore_errors pandas.Timestamp.ceil SA01\ - --ignore_errors pandas.Timestamp.combine PR01,SA01\ - --ignore_errors pandas.Timestamp.ctime SA01\ - --ignore_errors pandas.Timestamp.date SA01\ - --ignore_errors pandas.Timestamp.day GL08\ - --ignore_errors pandas.Timestamp.day_name SA01\ - --ignore_errors pandas.Timestamp.day_of_week SA01\ - --ignore_errors pandas.Timestamp.day_of_year SA01\ - --ignore_errors pandas.Timestamp.dayofweek SA01\ - --ignore_errors pandas.Timestamp.dayofyear SA01\ - --ignore_errors pandas.Timestamp.days_in_month SA01\ - --ignore_errors pandas.Timestamp.daysinmonth SA01\ - --ignore_errors pandas.Timestamp.dst SA01\ - --ignore_errors pandas.Timestamp.floor SA01\ - --ignore_errors pandas.Timestamp.fold GL08\ - --ignore_errors pandas.Timestamp.fromordinal SA01\ - --ignore_errors pandas.Timestamp.fromtimestamp PR01,SA01\ - --ignore_errors pandas.Timestamp.hour GL08\ - --ignore_errors pandas.Timestamp.is_leap_year SA01\ - --ignore_errors pandas.Timestamp.isocalendar SA01\ - --ignore_errors pandas.Timestamp.isoformat SA01\ - --ignore_errors pandas.Timestamp.isoweekday SA01\ - --ignore_errors pandas.Timestamp.max PR02,PR07,SA01\ - --ignore_errors pandas.Timestamp.microsecond GL08\ - --ignore_errors pandas.Timestamp.min PR02,PR07,SA01\ - --ignore_errors pandas.Timestamp.minute GL08\ - --ignore_errors pandas.Timestamp.month GL08\ - --ignore_errors pandas.Timestamp.month_name SA01\ - --ignore_errors pandas.Timestamp.nanosecond GL08\ - --ignore_errors pandas.Timestamp.normalize SA01\ - --ignore_errors pandas.Timestamp.now SA01\ - --ignore_errors pandas.Timestamp.quarter SA01\ - --ignore_errors pandas.Timestamp.replace PR07,SA01\ - --ignore_errors pandas.Timestamp.resolution PR02,PR07,SA01\ - --ignore_errors pandas.Timestamp.round SA01\ - --ignore_errors pandas.Timestamp.second GL08\ - --ignore_errors pandas.Timestamp.strftime SA01\ - --ignore_errors pandas.Timestamp.strptime PR01,SA01\ - --ignore_errors pandas.Timestamp.time SA01\ - --ignore_errors pandas.Timestamp.timestamp SA01\ - --ignore_errors pandas.Timestamp.timetuple SA01\ - --ignore_errors pandas.Timestamp.timetz SA01\ - --ignore_errors pandas.Timestamp.to_datetime64 SA01\ - --ignore_errors pandas.Timestamp.to_julian_date SA01\ - --ignore_errors pandas.Timestamp.to_numpy PR01\ - --ignore_errors pandas.Timestamp.to_period PR01,SA01\ - --ignore_errors pandas.Timestamp.to_pydatetime PR01,SA01\ - --ignore_errors pandas.Timestamp.today SA01\ - --ignore_errors pandas.Timestamp.toordinal SA01\ - --ignore_errors pandas.Timestamp.tz SA01\ - --ignore_errors pandas.Timestamp.tz_convert SA01\ - --ignore_errors pandas.Timestamp.tz_localize SA01\ - --ignore_errors pandas.Timestamp.tzinfo GL08\ - --ignore_errors pandas.Timestamp.tzname SA01\ - --ignore_errors pandas.Timestamp.unit SA01\ - --ignore_errors pandas.Timestamp.utcfromtimestamp PR01,SA01\ - --ignore_errors pandas.Timestamp.utcnow SA01\ - --ignore_errors pandas.Timestamp.utcoffset SA01\ - --ignore_errors pandas.Timestamp.utctimetuple SA01\ - --ignore_errors pandas.Timestamp.value GL08\ - --ignore_errors pandas.Timestamp.week SA01\ - --ignore_errors pandas.Timestamp.weekday SA01\ - --ignore_errors pandas.Timestamp.weekofyear SA01\ - --ignore_errors pandas.Timestamp.year GL08\ - --ignore_errors pandas.UInt16Dtype SA01\ - --ignore_errors pandas.UInt32Dtype SA01\ - --ignore_errors pandas.UInt64Dtype SA01\ - --ignore_errors pandas.UInt8Dtype SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray._accumulate RT03,SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray._concat_same_type PR07,SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray._formatter SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray._from_sequence SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray._from_sequence_of_strings SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray._hash_pandas_object RT03,SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray._pad_or_backfill PR01,RT03,SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray._reduce RT03,SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray._values_for_factorize SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.astype SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.copy RT03,SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.dropna RT03,SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.dtype SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.duplicated RT03,SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.equals SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.fillna SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.insert PR07,RT03,SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.interpolate PR01,SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.isin PR07,RT03,SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.isna SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.nbytes SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.ndim SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.ravel RT03,SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.shape SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.shift SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.take RT03\ - --ignore_errors pandas.api.extensions.ExtensionArray.tolist RT03,SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.unique RT03,SA01\ - --ignore_errors pandas.api.extensions.ExtensionArray.view SA01\ - --ignore_errors pandas.api.extensions.register_extension_dtype SA01\ - --ignore_errors pandas.api.indexers.BaseIndexer PR01,SA01\ - --ignore_errors pandas.api.indexers.FixedForwardWindowIndexer PR01,SA01\ - --ignore_errors pandas.api.indexers.VariableOffsetWindowIndexer PR01,SA01\ - --ignore_errors pandas.api.interchange.from_dataframe RT03,SA01\ - --ignore_errors pandas.api.types.infer_dtype PR07,SA01\ - --ignore_errors pandas.api.types.is_any_real_numeric_dtype SA01\ - --ignore_errors pandas.api.types.is_bool PR01,SA01\ - --ignore_errors pandas.api.types.is_bool_dtype SA01\ - --ignore_errors pandas.api.types.is_categorical_dtype SA01\ - --ignore_errors pandas.api.types.is_complex PR01,SA01\ - --ignore_errors pandas.api.types.is_complex_dtype SA01\ - --ignore_errors pandas.api.types.is_datetime64_any_dtype SA01\ - --ignore_errors pandas.api.types.is_datetime64_dtype SA01\ - --ignore_errors pandas.api.types.is_datetime64_ns_dtype SA01\ - --ignore_errors pandas.api.types.is_datetime64tz_dtype SA01\ - --ignore_errors pandas.api.types.is_dict_like PR07,SA01\ - --ignore_errors pandas.api.types.is_extension_array_dtype SA01\ - --ignore_errors pandas.api.types.is_file_like PR07,SA01\ - --ignore_errors pandas.api.types.is_float PR01,SA01\ - --ignore_errors pandas.api.types.is_float_dtype SA01\ - --ignore_errors pandas.api.types.is_hashable PR01,RT03,SA01\ - --ignore_errors pandas.api.types.is_int64_dtype SA01\ - --ignore_errors pandas.api.types.is_integer PR01,SA01\ - --ignore_errors pandas.api.types.is_integer_dtype SA01\ - --ignore_errors pandas.api.types.is_interval_dtype SA01\ - --ignore_errors pandas.api.types.is_iterator PR07,SA01\ - --ignore_errors pandas.api.types.is_list_like SA01\ - --ignore_errors pandas.api.types.is_named_tuple PR07,SA01\ - --ignore_errors pandas.api.types.is_numeric_dtype SA01\ - --ignore_errors pandas.api.types.is_object_dtype SA01\ - --ignore_errors pandas.api.types.is_period_dtype SA01\ - --ignore_errors pandas.api.types.is_re PR07,SA01\ - --ignore_errors pandas.api.types.is_re_compilable PR07,SA01\ - --ignore_errors pandas.api.types.is_scalar SA01\ - --ignore_errors pandas.api.types.is_signed_integer_dtype SA01\ - --ignore_errors pandas.api.types.is_sparse SA01\ - --ignore_errors pandas.api.types.is_string_dtype SA01\ - --ignore_errors pandas.api.types.is_timedelta64_dtype SA01\ - --ignore_errors pandas.api.types.is_timedelta64_ns_dtype SA01\ - --ignore_errors pandas.api.types.is_unsigned_integer_dtype SA01\ - --ignore_errors pandas.api.types.pandas_dtype PR07,RT03,SA01\ - --ignore_errors pandas.api.types.union_categoricals RT03,SA01\ - --ignore_errors pandas.arrays.ArrowExtensionArray PR07,SA01\ - --ignore_errors pandas.arrays.BooleanArray SA01\ - --ignore_errors pandas.arrays.DatetimeArray SA01\ - --ignore_errors pandas.arrays.FloatingArray SA01\ - --ignore_errors pandas.arrays.IntegerArray SA01\ - --ignore_errors pandas.arrays.IntervalArray.closed SA01\ - --ignore_errors pandas.arrays.IntervalArray.contains RT03\ - --ignore_errors pandas.arrays.IntervalArray.is_non_overlapping_monotonic SA01\ - --ignore_errors pandas.arrays.IntervalArray.left SA01\ - --ignore_errors pandas.arrays.IntervalArray.length SA01\ - --ignore_errors pandas.arrays.IntervalArray.mid SA01\ - --ignore_errors pandas.arrays.IntervalArray.right SA01\ - --ignore_errors pandas.arrays.IntervalArray.set_closed RT03,SA01\ - --ignore_errors pandas.arrays.IntervalArray.to_tuples RT03,SA01\ - --ignore_errors pandas.arrays.NumpyExtensionArray SA01\ - --ignore_errors pandas.arrays.SparseArray PR07,SA01\ - --ignore_errors pandas.arrays.TimedeltaArray PR07,SA01\ - --ignore_errors pandas.bdate_range RT03,SA01\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.__iter__ RT03,SA01\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.agg RT03\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.aggregate RT03\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.apply RT03\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.boxplot PR07,RT03,SA01\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.cummax RT03\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.cummin RT03\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.cumprod RT03\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.cumsum RT03\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.filter RT03,SA01\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.get_group RT03,SA01\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.groups SA01\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.hist RT03\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.indices SA01\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.max SA01\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.mean RT03\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.median SA01\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.min SA01\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.nth PR02\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.nunique RT03,SA01\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.ohlc SA01\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.plot PR02,SA01\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.prod SA01\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.rank RT03\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.resample RT03\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.sem SA01\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.skew RT03\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.sum SA01\ - --ignore_errors pandas.core.groupby.DataFrameGroupBy.transform RT03\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.__iter__ RT03,SA01\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.agg RT03\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.aggregate RT03\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.apply RT03\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.cummax RT03\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.cummin RT03\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.cumprod RT03\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.cumsum RT03\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.filter PR01,RT03,SA01\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.get_group RT03,SA01\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.groups SA01\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.indices SA01\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.is_monotonic_decreasing SA01\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.is_monotonic_increasing SA01\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.max SA01\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.mean RT03\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.median SA01\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.min SA01\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.nth PR02\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.nunique SA01\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.ohlc SA01\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.plot PR02,SA01\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.prod SA01\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.rank RT03\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.resample RT03\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.sem SA01\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.skew RT03\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.sum SA01\ - --ignore_errors pandas.core.groupby.SeriesGroupBy.transform RT03\ - --ignore_errors pandas.core.resample.Resampler.__iter__ RT03,SA01\ - --ignore_errors pandas.core.resample.Resampler.ffill RT03\ - --ignore_errors pandas.core.resample.Resampler.get_group RT03,SA01\ - --ignore_errors pandas.core.resample.Resampler.groups SA01\ - --ignore_errors pandas.core.resample.Resampler.indices SA01\ - --ignore_errors pandas.core.resample.Resampler.max PR01,RT03,SA01\ - --ignore_errors pandas.core.resample.Resampler.mean SA01\ - --ignore_errors pandas.core.resample.Resampler.median SA01\ - --ignore_errors pandas.core.resample.Resampler.min PR01,RT03,SA01\ - --ignore_errors pandas.core.resample.Resampler.nunique SA01\ - --ignore_errors pandas.core.resample.Resampler.ohlc SA01\ - --ignore_errors pandas.core.resample.Resampler.prod SA01\ - --ignore_errors pandas.core.resample.Resampler.quantile PR01,PR07\ - --ignore_errors pandas.core.resample.Resampler.sem SA01\ - --ignore_errors pandas.core.resample.Resampler.std SA01\ - --ignore_errors pandas.core.resample.Resampler.sum SA01\ - --ignore_errors pandas.core.resample.Resampler.transform PR01,RT03,SA01\ - --ignore_errors pandas.core.resample.Resampler.var SA01\ - --ignore_errors pandas.core.window.expanding.Expanding.corr PR01\ - --ignore_errors pandas.core.window.expanding.Expanding.count PR01\ - --ignore_errors pandas.core.window.rolling.Rolling.max PR01\ - --ignore_errors pandas.core.window.rolling.Window.std PR01\ - --ignore_errors pandas.core.window.rolling.Window.var PR01\ - --ignore_errors pandas.date_range RT03\ - --ignore_errors pandas.describe_option SA01\ - --ignore_errors pandas.errors.AbstractMethodError PR01,SA01\ - --ignore_errors pandas.errors.AttributeConflictWarning SA01\ - --ignore_errors pandas.errors.CSSWarning SA01\ - --ignore_errors pandas.errors.CategoricalConversionWarning SA01\ - --ignore_errors pandas.errors.ChainedAssignmentError SA01\ - --ignore_errors pandas.errors.ClosedFileError SA01\ - --ignore_errors pandas.errors.DataError SA01\ - --ignore_errors pandas.errors.DuplicateLabelError SA01\ - --ignore_errors pandas.errors.EmptyDataError SA01\ - --ignore_errors pandas.errors.IntCastingNaNError SA01\ - --ignore_errors pandas.errors.InvalidIndexError SA01\ - --ignore_errors pandas.errors.InvalidVersion SA01\ - --ignore_errors pandas.errors.MergeError SA01\ - --ignore_errors pandas.errors.NullFrequencyError SA01\ - --ignore_errors pandas.errors.NumExprClobberingError SA01\ - --ignore_errors pandas.errors.NumbaUtilError SA01\ - --ignore_errors pandas.errors.OptionError SA01\ - --ignore_errors pandas.errors.OutOfBoundsDatetime SA01\ - --ignore_errors pandas.errors.OutOfBoundsTimedelta SA01\ - --ignore_errors pandas.errors.PerformanceWarning SA01\ - --ignore_errors pandas.errors.PossibleDataLossError SA01\ - --ignore_errors pandas.errors.PossiblePrecisionLoss SA01\ - --ignore_errors pandas.errors.SpecificationError SA01\ - --ignore_errors pandas.errors.UndefinedVariableError PR01,SA01\ - --ignore_errors pandas.errors.UnsortedIndexError SA01\ - --ignore_errors pandas.errors.UnsupportedFunctionCall SA01\ - --ignore_errors pandas.errors.ValueLabelTypeMismatch SA01\ - --ignore_errors pandas.get_option PR01,SA01\ - --ignore_errors pandas.infer_freq SA01\ - --ignore_errors pandas.interval_range RT03\ - --ignore_errors pandas.io.formats.style.Styler.apply RT03\ - --ignore_errors pandas.io.formats.style.Styler.apply_index RT03\ - --ignore_errors pandas.io.formats.style.Styler.background_gradient RT03\ - --ignore_errors pandas.io.formats.style.Styler.bar RT03,SA01\ - --ignore_errors pandas.io.formats.style.Styler.clear SA01\ - --ignore_errors pandas.io.formats.style.Styler.concat RT03,SA01\ - --ignore_errors pandas.io.formats.style.Styler.export RT03\ - --ignore_errors pandas.io.formats.style.Styler.format RT03\ - --ignore_errors pandas.io.formats.style.Styler.format_index RT03\ - --ignore_errors pandas.io.formats.style.Styler.from_custom_template SA01\ - --ignore_errors pandas.io.formats.style.Styler.hide RT03,SA01\ - --ignore_errors pandas.io.formats.style.Styler.highlight_between RT03\ - --ignore_errors pandas.io.formats.style.Styler.highlight_max RT03\ - --ignore_errors pandas.io.formats.style.Styler.highlight_min RT03\ - --ignore_errors pandas.io.formats.style.Styler.highlight_null RT03\ - --ignore_errors pandas.io.formats.style.Styler.highlight_quantile RT03\ - --ignore_errors pandas.io.formats.style.Styler.map RT03\ - --ignore_errors pandas.io.formats.style.Styler.map_index RT03\ - --ignore_errors pandas.io.formats.style.Styler.relabel_index RT03\ - --ignore_errors pandas.io.formats.style.Styler.set_caption RT03,SA01\ - --ignore_errors pandas.io.formats.style.Styler.set_properties RT03,SA01\ - --ignore_errors pandas.io.formats.style.Styler.set_sticky RT03,SA01\ - --ignore_errors pandas.io.formats.style.Styler.set_table_attributes PR07,RT03\ - --ignore_errors pandas.io.formats.style.Styler.set_table_styles RT03\ - --ignore_errors pandas.io.formats.style.Styler.set_td_classes RT03\ - --ignore_errors pandas.io.formats.style.Styler.set_tooltips RT03,SA01\ - --ignore_errors pandas.io.formats.style.Styler.set_uuid PR07,RT03,SA01\ - --ignore_errors pandas.io.formats.style.Styler.text_gradient RT03\ - --ignore_errors pandas.io.formats.style.Styler.to_excel PR01\ - --ignore_errors pandas.io.formats.style.Styler.to_string SA01\ - --ignore_errors pandas.io.formats.style.Styler.use RT03\ - --ignore_errors pandas.io.json.build_table_schema PR07,RT03,SA01\ - --ignore_errors pandas.io.stata.StataReader.data_label SA01\ - --ignore_errors pandas.io.stata.StataReader.value_labels RT03,SA01\ - --ignore_errors pandas.io.stata.StataReader.variable_labels RT03,SA01\ - --ignore_errors pandas.io.stata.StataWriter.write_file SA01\ - --ignore_errors pandas.json_normalize RT03,SA01\ - --ignore_errors pandas.merge PR07\ - --ignore_errors pandas.merge_asof PR07,RT03\ - --ignore_errors pandas.merge_ordered PR07\ - --ignore_errors pandas.option_context SA01\ - --ignore_errors pandas.period_range RT03,SA01\ - --ignore_errors pandas.pivot PR07\ - --ignore_errors pandas.pivot_table PR07\ - --ignore_errors pandas.plotting.andrews_curves RT03,SA01\ - --ignore_errors pandas.plotting.autocorrelation_plot RT03,SA01\ - --ignore_errors pandas.plotting.lag_plot RT03,SA01\ - --ignore_errors pandas.plotting.parallel_coordinates PR07,RT03,SA01\ - --ignore_errors pandas.plotting.plot_params SA01\ - --ignore_errors pandas.plotting.radviz RT03\ - --ignore_errors pandas.plotting.scatter_matrix PR07,SA01\ - --ignore_errors pandas.plotting.table PR07,RT03,SA01\ - --ignore_errors pandas.qcut PR07,SA01\ - --ignore_errors pandas.read_feather SA01\ - --ignore_errors pandas.read_orc SA01\ - --ignore_errors pandas.read_sas SA01\ - --ignore_errors pandas.read_spss SA01\ - --ignore_errors pandas.reset_option SA01\ - --ignore_errors pandas.set_eng_float_format RT03,SA01\ - --ignore_errors pandas.set_option SA01\ - --ignore_errors pandas.show_versions SA01\ - --ignore_errors pandas.test SA01\ - --ignore_errors pandas.testing.assert_extension_array_equal SA01\ - --ignore_errors pandas.testing.assert_index_equal PR07,SA01\ - --ignore_errors pandas.testing.assert_series_equal PR07,SA01\ - --ignore_errors pandas.timedelta_range SA01\ - --ignore_errors pandas.tseries.api.guess_datetime_format SA01\ - --ignore_errors pandas.tseries.offsets.BDay PR02,SA01\ - --ignore_errors pandas.tseries.offsets.BMonthBegin PR02\ - --ignore_errors pandas.tseries.offsets.BMonthEnd PR02\ - --ignore_errors pandas.tseries.offsets.BQuarterBegin PR02\ - --ignore_errors pandas.tseries.offsets.BQuarterBegin.copy SA01\ - --ignore_errors pandas.tseries.offsets.BQuarterBegin.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.BQuarterBegin.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.BQuarterBegin.kwds SA01\ - --ignore_errors pandas.tseries.offsets.BQuarterBegin.n GL08\ - --ignore_errors pandas.tseries.offsets.BQuarterBegin.name SA01\ - --ignore_errors pandas.tseries.offsets.BQuarterBegin.nanos GL08\ - --ignore_errors pandas.tseries.offsets.BQuarterBegin.normalize GL08\ - --ignore_errors pandas.tseries.offsets.BQuarterBegin.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.BQuarterBegin.startingMonth GL08\ - --ignore_errors pandas.tseries.offsets.BQuarterEnd PR02\ - --ignore_errors pandas.tseries.offsets.BQuarterEnd.copy SA01\ - --ignore_errors pandas.tseries.offsets.BQuarterEnd.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.BQuarterEnd.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.BQuarterEnd.kwds SA01\ - --ignore_errors pandas.tseries.offsets.BQuarterEnd.n GL08\ - --ignore_errors pandas.tseries.offsets.BQuarterEnd.name SA01\ - --ignore_errors pandas.tseries.offsets.BQuarterEnd.nanos GL08\ - --ignore_errors pandas.tseries.offsets.BQuarterEnd.normalize GL08\ - --ignore_errors pandas.tseries.offsets.BQuarterEnd.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.BQuarterEnd.startingMonth GL08\ - --ignore_errors pandas.tseries.offsets.BYearBegin PR02\ - --ignore_errors pandas.tseries.offsets.BYearBegin.copy SA01\ - --ignore_errors pandas.tseries.offsets.BYearBegin.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.BYearBegin.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.BYearBegin.kwds SA01\ - --ignore_errors pandas.tseries.offsets.BYearBegin.month GL08\ - --ignore_errors pandas.tseries.offsets.BYearBegin.n GL08\ - --ignore_errors pandas.tseries.offsets.BYearBegin.name SA01\ - --ignore_errors pandas.tseries.offsets.BYearBegin.nanos GL08\ - --ignore_errors pandas.tseries.offsets.BYearBegin.normalize GL08\ - --ignore_errors pandas.tseries.offsets.BYearBegin.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.BYearEnd PR02\ - --ignore_errors pandas.tseries.offsets.BYearEnd.copy SA01\ - --ignore_errors pandas.tseries.offsets.BYearEnd.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.BYearEnd.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.BYearEnd.kwds SA01\ - --ignore_errors pandas.tseries.offsets.BYearEnd.month GL08\ - --ignore_errors pandas.tseries.offsets.BYearEnd.n GL08\ - --ignore_errors pandas.tseries.offsets.BYearEnd.name SA01\ - --ignore_errors pandas.tseries.offsets.BYearEnd.nanos GL08\ - --ignore_errors pandas.tseries.offsets.BYearEnd.normalize GL08\ - --ignore_errors pandas.tseries.offsets.BYearEnd.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.BusinessDay PR02,SA01\ - --ignore_errors pandas.tseries.offsets.BusinessDay.calendar GL08\ - --ignore_errors pandas.tseries.offsets.BusinessDay.copy SA01\ - --ignore_errors pandas.tseries.offsets.BusinessDay.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.BusinessDay.holidays GL08\ - --ignore_errors pandas.tseries.offsets.BusinessDay.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.BusinessDay.kwds SA01\ - --ignore_errors pandas.tseries.offsets.BusinessDay.n GL08\ - --ignore_errors pandas.tseries.offsets.BusinessDay.name SA01\ - --ignore_errors pandas.tseries.offsets.BusinessDay.nanos GL08\ - --ignore_errors pandas.tseries.offsets.BusinessDay.normalize GL08\ - --ignore_errors pandas.tseries.offsets.BusinessDay.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.BusinessDay.weekmask GL08\ - --ignore_errors pandas.tseries.offsets.BusinessHour PR02,SA01\ - --ignore_errors pandas.tseries.offsets.BusinessHour.calendar GL08\ - --ignore_errors pandas.tseries.offsets.BusinessHour.copy SA01\ - --ignore_errors pandas.tseries.offsets.BusinessHour.end GL08\ - --ignore_errors pandas.tseries.offsets.BusinessHour.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.BusinessHour.holidays GL08\ - --ignore_errors pandas.tseries.offsets.BusinessHour.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.BusinessHour.kwds SA01\ - --ignore_errors pandas.tseries.offsets.BusinessHour.n GL08\ - --ignore_errors pandas.tseries.offsets.BusinessHour.name SA01\ - --ignore_errors pandas.tseries.offsets.BusinessHour.nanos GL08\ - --ignore_errors pandas.tseries.offsets.BusinessHour.normalize GL08\ - --ignore_errors pandas.tseries.offsets.BusinessHour.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.BusinessHour.start GL08\ - --ignore_errors pandas.tseries.offsets.BusinessHour.weekmask GL08\ - --ignore_errors pandas.tseries.offsets.BusinessMonthBegin PR02\ - --ignore_errors pandas.tseries.offsets.BusinessMonthBegin.copy SA01\ - --ignore_errors pandas.tseries.offsets.BusinessMonthBegin.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.BusinessMonthBegin.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.BusinessMonthBegin.kwds SA01\ - --ignore_errors pandas.tseries.offsets.BusinessMonthBegin.n GL08\ - --ignore_errors pandas.tseries.offsets.BusinessMonthBegin.name SA01\ - --ignore_errors pandas.tseries.offsets.BusinessMonthBegin.nanos GL08\ - --ignore_errors pandas.tseries.offsets.BusinessMonthBegin.normalize GL08\ - --ignore_errors pandas.tseries.offsets.BusinessMonthBegin.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.BusinessMonthEnd PR02\ - --ignore_errors pandas.tseries.offsets.BusinessMonthEnd.copy SA01\ - --ignore_errors pandas.tseries.offsets.BusinessMonthEnd.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.BusinessMonthEnd.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.BusinessMonthEnd.kwds SA01\ - --ignore_errors pandas.tseries.offsets.BusinessMonthEnd.n GL08\ - --ignore_errors pandas.tseries.offsets.BusinessMonthEnd.name SA01\ - --ignore_errors pandas.tseries.offsets.BusinessMonthEnd.nanos GL08\ - --ignore_errors pandas.tseries.offsets.BusinessMonthEnd.normalize GL08\ - --ignore_errors pandas.tseries.offsets.BusinessMonthEnd.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.CBMonthBegin PR02\ - --ignore_errors pandas.tseries.offsets.CBMonthEnd PR02\ - --ignore_errors pandas.tseries.offsets.CDay PR02,SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessDay PR02,SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessDay.calendar GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessDay.copy SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessDay.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessDay.holidays GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessDay.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessDay.kwds SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessDay.n GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessDay.name SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessDay.nanos GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessDay.normalize GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessDay.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessDay.weekmask GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessHour PR02,SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessHour.calendar GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessHour.copy SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessHour.end GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessHour.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessHour.holidays GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessHour.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessHour.kwds SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessHour.n GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessHour.name SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessHour.nanos GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessHour.normalize GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessHour.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessHour.start GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessHour.weekmask GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthBegin PR02\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthBegin.calendar GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthBegin.copy SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthBegin.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthBegin.holidays GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthBegin.is_on_offset SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthBegin.kwds SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthBegin.m_offset GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthBegin.n GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthBegin.name SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthBegin.nanos GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthBegin.normalize GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthBegin.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthBegin.weekmask GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthEnd PR02\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthEnd.calendar GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthEnd.copy SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthEnd.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthEnd.holidays GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthEnd.is_on_offset SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthEnd.kwds SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthEnd.m_offset GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthEnd.n GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthEnd.name SA01\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthEnd.nanos GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthEnd.normalize GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthEnd.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.CustomBusinessMonthEnd.weekmask GL08\ - --ignore_errors pandas.tseries.offsets.DateOffset PR02\ - --ignore_errors pandas.tseries.offsets.DateOffset.copy SA01\ - --ignore_errors pandas.tseries.offsets.DateOffset.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.DateOffset.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.DateOffset.kwds SA01\ - --ignore_errors pandas.tseries.offsets.DateOffset.n GL08\ - --ignore_errors pandas.tseries.offsets.DateOffset.name SA01\ - --ignore_errors pandas.tseries.offsets.DateOffset.nanos GL08\ - --ignore_errors pandas.tseries.offsets.DateOffset.normalize GL08\ - --ignore_errors pandas.tseries.offsets.DateOffset.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.Day PR02\ - --ignore_errors pandas.tseries.offsets.Day.copy SA01\ - --ignore_errors pandas.tseries.offsets.Day.delta GL08\ - --ignore_errors pandas.tseries.offsets.Day.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.Day.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.Day.kwds SA01\ - --ignore_errors pandas.tseries.offsets.Day.n GL08\ - --ignore_errors pandas.tseries.offsets.Day.name SA01\ - --ignore_errors pandas.tseries.offsets.Day.nanos SA01\ - --ignore_errors pandas.tseries.offsets.Day.normalize GL08\ - --ignore_errors pandas.tseries.offsets.Day.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.Easter PR02\ - --ignore_errors pandas.tseries.offsets.Easter.copy SA01\ - --ignore_errors pandas.tseries.offsets.Easter.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.Easter.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.Easter.kwds SA01\ - --ignore_errors pandas.tseries.offsets.Easter.n GL08\ - --ignore_errors pandas.tseries.offsets.Easter.name SA01\ - --ignore_errors pandas.tseries.offsets.Easter.nanos GL08\ - --ignore_errors pandas.tseries.offsets.Easter.normalize GL08\ - --ignore_errors pandas.tseries.offsets.Easter.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.FY5253 PR02\ - --ignore_errors pandas.tseries.offsets.FY5253.copy SA01\ - --ignore_errors pandas.tseries.offsets.FY5253.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.FY5253.get_rule_code_suffix GL08\ - --ignore_errors pandas.tseries.offsets.FY5253.get_year_end GL08\ - --ignore_errors pandas.tseries.offsets.FY5253.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.FY5253.kwds SA01\ - --ignore_errors pandas.tseries.offsets.FY5253.n GL08\ - --ignore_errors pandas.tseries.offsets.FY5253.name SA01\ - --ignore_errors pandas.tseries.offsets.FY5253.nanos GL08\ - --ignore_errors pandas.tseries.offsets.FY5253.normalize GL08\ - --ignore_errors pandas.tseries.offsets.FY5253.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.FY5253.startingMonth GL08\ - --ignore_errors pandas.tseries.offsets.FY5253.variation GL08\ - --ignore_errors pandas.tseries.offsets.FY5253.weekday GL08\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter PR02\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter.copy SA01\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter.get_rule_code_suffix GL08\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter.get_weeks GL08\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter.kwds SA01\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter.n GL08\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter.name SA01\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter.nanos GL08\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter.normalize GL08\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter.qtr_with_extra_week GL08\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter.startingMonth GL08\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter.variation GL08\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter.weekday GL08\ - --ignore_errors pandas.tseries.offsets.FY5253Quarter.year_has_extra_week GL08\ - --ignore_errors pandas.tseries.offsets.Hour PR02\ - --ignore_errors pandas.tseries.offsets.Hour.copy SA01\ - --ignore_errors pandas.tseries.offsets.Hour.delta GL08\ - --ignore_errors pandas.tseries.offsets.Hour.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.Hour.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.Hour.kwds SA01\ - --ignore_errors pandas.tseries.offsets.Hour.n GL08\ - --ignore_errors pandas.tseries.offsets.Hour.name SA01\ - --ignore_errors pandas.tseries.offsets.Hour.nanos SA01\ - --ignore_errors pandas.tseries.offsets.Hour.normalize GL08\ - --ignore_errors pandas.tseries.offsets.Hour.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.LastWeekOfMonth PR02,SA01\ - --ignore_errors pandas.tseries.offsets.LastWeekOfMonth.copy SA01\ - --ignore_errors pandas.tseries.offsets.LastWeekOfMonth.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.LastWeekOfMonth.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.LastWeekOfMonth.kwds SA01\ - --ignore_errors pandas.tseries.offsets.LastWeekOfMonth.n GL08\ - --ignore_errors pandas.tseries.offsets.LastWeekOfMonth.name SA01\ - --ignore_errors pandas.tseries.offsets.LastWeekOfMonth.nanos GL08\ - --ignore_errors pandas.tseries.offsets.LastWeekOfMonth.normalize GL08\ - --ignore_errors pandas.tseries.offsets.LastWeekOfMonth.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.LastWeekOfMonth.week GL08\ - --ignore_errors pandas.tseries.offsets.LastWeekOfMonth.weekday GL08\ - --ignore_errors pandas.tseries.offsets.Micro PR02\ - --ignore_errors pandas.tseries.offsets.Micro.copy SA01\ - --ignore_errors pandas.tseries.offsets.Micro.delta GL08\ - --ignore_errors pandas.tseries.offsets.Micro.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.Micro.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.Micro.kwds SA01\ - --ignore_errors pandas.tseries.offsets.Micro.n GL08\ - --ignore_errors pandas.tseries.offsets.Micro.name SA01\ - --ignore_errors pandas.tseries.offsets.Micro.nanos SA01\ - --ignore_errors pandas.tseries.offsets.Micro.normalize GL08\ - --ignore_errors pandas.tseries.offsets.Micro.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.Milli PR02\ - --ignore_errors pandas.tseries.offsets.Milli.copy SA01\ - --ignore_errors pandas.tseries.offsets.Milli.delta GL08\ - --ignore_errors pandas.tseries.offsets.Milli.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.Milli.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.Milli.kwds SA01\ - --ignore_errors pandas.tseries.offsets.Milli.n GL08\ - --ignore_errors pandas.tseries.offsets.Milli.name SA01\ - --ignore_errors pandas.tseries.offsets.Milli.nanos SA01\ - --ignore_errors pandas.tseries.offsets.Milli.normalize GL08\ - --ignore_errors pandas.tseries.offsets.Milli.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.Minute PR02\ - --ignore_errors pandas.tseries.offsets.Minute.copy SA01\ - --ignore_errors pandas.tseries.offsets.Minute.delta GL08\ - --ignore_errors pandas.tseries.offsets.Minute.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.Minute.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.Minute.kwds SA01\ - --ignore_errors pandas.tseries.offsets.Minute.n GL08\ - --ignore_errors pandas.tseries.offsets.Minute.name SA01\ - --ignore_errors pandas.tseries.offsets.Minute.nanos SA01\ - --ignore_errors pandas.tseries.offsets.Minute.normalize GL08\ - --ignore_errors pandas.tseries.offsets.Minute.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.MonthBegin PR02\ - --ignore_errors pandas.tseries.offsets.MonthBegin.copy SA01\ - --ignore_errors pandas.tseries.offsets.MonthBegin.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.MonthBegin.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.MonthBegin.kwds SA01\ - --ignore_errors pandas.tseries.offsets.MonthBegin.n GL08\ - --ignore_errors pandas.tseries.offsets.MonthBegin.name SA01\ - --ignore_errors pandas.tseries.offsets.MonthBegin.nanos GL08\ - --ignore_errors pandas.tseries.offsets.MonthBegin.normalize GL08\ - --ignore_errors pandas.tseries.offsets.MonthBegin.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.MonthEnd PR02\ - --ignore_errors pandas.tseries.offsets.MonthEnd.copy SA01\ - --ignore_errors pandas.tseries.offsets.MonthEnd.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.MonthEnd.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.MonthEnd.kwds SA01\ - --ignore_errors pandas.tseries.offsets.MonthEnd.n GL08\ - --ignore_errors pandas.tseries.offsets.MonthEnd.name SA01\ - --ignore_errors pandas.tseries.offsets.MonthEnd.nanos GL08\ - --ignore_errors pandas.tseries.offsets.MonthEnd.normalize GL08\ - --ignore_errors pandas.tseries.offsets.MonthEnd.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.Nano PR02\ - --ignore_errors pandas.tseries.offsets.Nano.copy SA01\ - --ignore_errors pandas.tseries.offsets.Nano.delta GL08\ - --ignore_errors pandas.tseries.offsets.Nano.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.Nano.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.Nano.kwds SA01\ - --ignore_errors pandas.tseries.offsets.Nano.n GL08\ - --ignore_errors pandas.tseries.offsets.Nano.name SA01\ - --ignore_errors pandas.tseries.offsets.Nano.nanos SA01\ - --ignore_errors pandas.tseries.offsets.Nano.normalize GL08\ - --ignore_errors pandas.tseries.offsets.Nano.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.QuarterBegin PR02\ - --ignore_errors pandas.tseries.offsets.QuarterBegin.copy SA01\ - --ignore_errors pandas.tseries.offsets.QuarterBegin.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.QuarterBegin.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.QuarterBegin.kwds SA01\ - --ignore_errors pandas.tseries.offsets.QuarterBegin.n GL08\ - --ignore_errors pandas.tseries.offsets.QuarterBegin.name SA01\ - --ignore_errors pandas.tseries.offsets.QuarterBegin.nanos GL08\ - --ignore_errors pandas.tseries.offsets.QuarterBegin.normalize GL08\ - --ignore_errors pandas.tseries.offsets.QuarterBegin.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.QuarterBegin.startingMonth GL08\ - --ignore_errors pandas.tseries.offsets.QuarterEnd PR02\ - --ignore_errors pandas.tseries.offsets.QuarterEnd.copy SA01\ - --ignore_errors pandas.tseries.offsets.QuarterEnd.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.QuarterEnd.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.QuarterEnd.kwds SA01\ - --ignore_errors pandas.tseries.offsets.QuarterEnd.n GL08\ - --ignore_errors pandas.tseries.offsets.QuarterEnd.name SA01\ - --ignore_errors pandas.tseries.offsets.QuarterEnd.nanos GL08\ - --ignore_errors pandas.tseries.offsets.QuarterEnd.normalize GL08\ - --ignore_errors pandas.tseries.offsets.QuarterEnd.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.QuarterEnd.startingMonth GL08\ - --ignore_errors pandas.tseries.offsets.Second PR02\ - --ignore_errors pandas.tseries.offsets.Second.copy SA01\ - --ignore_errors pandas.tseries.offsets.Second.delta GL08\ - --ignore_errors pandas.tseries.offsets.Second.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.Second.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.Second.kwds SA01\ - --ignore_errors pandas.tseries.offsets.Second.n GL08\ - --ignore_errors pandas.tseries.offsets.Second.name SA01\ - --ignore_errors pandas.tseries.offsets.Second.nanos SA01\ - --ignore_errors pandas.tseries.offsets.Second.normalize GL08\ - --ignore_errors pandas.tseries.offsets.Second.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.SemiMonthBegin PR02,SA01\ - --ignore_errors pandas.tseries.offsets.SemiMonthBegin.copy SA01\ - --ignore_errors pandas.tseries.offsets.SemiMonthBegin.day_of_month GL08\ - --ignore_errors pandas.tseries.offsets.SemiMonthBegin.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.SemiMonthBegin.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.SemiMonthBegin.kwds SA01\ - --ignore_errors pandas.tseries.offsets.SemiMonthBegin.n GL08\ - --ignore_errors pandas.tseries.offsets.SemiMonthBegin.name SA01\ - --ignore_errors pandas.tseries.offsets.SemiMonthBegin.nanos GL08\ - --ignore_errors pandas.tseries.offsets.SemiMonthBegin.normalize GL08\ - --ignore_errors pandas.tseries.offsets.SemiMonthBegin.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.SemiMonthEnd PR02,SA01\ - --ignore_errors pandas.tseries.offsets.SemiMonthEnd.copy SA01\ - --ignore_errors pandas.tseries.offsets.SemiMonthEnd.day_of_month GL08\ - --ignore_errors pandas.tseries.offsets.SemiMonthEnd.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.SemiMonthEnd.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.SemiMonthEnd.kwds SA01\ - --ignore_errors pandas.tseries.offsets.SemiMonthEnd.n GL08\ - --ignore_errors pandas.tseries.offsets.SemiMonthEnd.name SA01\ - --ignore_errors pandas.tseries.offsets.SemiMonthEnd.nanos GL08\ - --ignore_errors pandas.tseries.offsets.SemiMonthEnd.normalize GL08\ - --ignore_errors pandas.tseries.offsets.SemiMonthEnd.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.Tick GL08\ - --ignore_errors pandas.tseries.offsets.Tick.copy SA01\ - --ignore_errors pandas.tseries.offsets.Tick.delta GL08\ - --ignore_errors pandas.tseries.offsets.Tick.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.Tick.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.Tick.kwds SA01\ - --ignore_errors pandas.tseries.offsets.Tick.n GL08\ - --ignore_errors pandas.tseries.offsets.Tick.name SA01\ - --ignore_errors pandas.tseries.offsets.Tick.nanos SA01\ - --ignore_errors pandas.tseries.offsets.Tick.normalize GL08\ - --ignore_errors pandas.tseries.offsets.Tick.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.Week PR02\ - --ignore_errors pandas.tseries.offsets.Week.copy SA01\ - --ignore_errors pandas.tseries.offsets.Week.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.Week.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.Week.kwds SA01\ - --ignore_errors pandas.tseries.offsets.Week.n GL08\ - --ignore_errors pandas.tseries.offsets.Week.name SA01\ - --ignore_errors pandas.tseries.offsets.Week.nanos GL08\ - --ignore_errors pandas.tseries.offsets.Week.normalize GL08\ - --ignore_errors pandas.tseries.offsets.Week.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.Week.weekday GL08\ - --ignore_errors pandas.tseries.offsets.WeekOfMonth PR02,SA01\ - --ignore_errors pandas.tseries.offsets.WeekOfMonth.copy SA01\ - --ignore_errors pandas.tseries.offsets.WeekOfMonth.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.WeekOfMonth.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.WeekOfMonth.kwds SA01\ - --ignore_errors pandas.tseries.offsets.WeekOfMonth.n GL08\ - --ignore_errors pandas.tseries.offsets.WeekOfMonth.name SA01\ - --ignore_errors pandas.tseries.offsets.WeekOfMonth.nanos GL08\ - --ignore_errors pandas.tseries.offsets.WeekOfMonth.normalize GL08\ - --ignore_errors pandas.tseries.offsets.WeekOfMonth.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.WeekOfMonth.week GL08\ - --ignore_errors pandas.tseries.offsets.WeekOfMonth.weekday GL08\ - --ignore_errors pandas.tseries.offsets.YearBegin PR02\ - --ignore_errors pandas.tseries.offsets.YearBegin.copy SA01\ - --ignore_errors pandas.tseries.offsets.YearBegin.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.YearBegin.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.YearBegin.kwds SA01\ - --ignore_errors pandas.tseries.offsets.YearBegin.month GL08\ - --ignore_errors pandas.tseries.offsets.YearBegin.n GL08\ - --ignore_errors pandas.tseries.offsets.YearBegin.name SA01\ - --ignore_errors pandas.tseries.offsets.YearBegin.nanos GL08\ - --ignore_errors pandas.tseries.offsets.YearBegin.normalize GL08\ - --ignore_errors pandas.tseries.offsets.YearBegin.rule_code GL08\ - --ignore_errors pandas.tseries.offsets.YearEnd PR02\ - --ignore_errors pandas.tseries.offsets.YearEnd.copy SA01\ - --ignore_errors pandas.tseries.offsets.YearEnd.freqstr SA01\ - --ignore_errors pandas.tseries.offsets.YearEnd.is_on_offset GL08\ - --ignore_errors pandas.tseries.offsets.YearEnd.kwds SA01\ - --ignore_errors pandas.tseries.offsets.YearEnd.month GL08\ - --ignore_errors pandas.tseries.offsets.YearEnd.n GL08\ - --ignore_errors pandas.tseries.offsets.YearEnd.name SA01\ - --ignore_errors pandas.tseries.offsets.YearEnd.nanos GL08\ - --ignore_errors pandas.tseries.offsets.YearEnd.normalize GL08\ - --ignore_errors pandas.tseries.offsets.YearEnd.rule_code GL08\ - --ignore_errors pandas.unique PR07\ - --ignore_errors pandas.util.hash_array PR07,SA01\ - --ignore_errors pandas.util.hash_pandas_object PR07,SA01 # There should be no backslash in the final line, please keep this comment in the last ignored function - ) - $BASE_DIR/scripts/validate_docstrings.py ${PARAMETERS[@]} - RET=$(($RET + $?)) ; + MSG='Validate Docstrings' ; echo $MSG + $BASE_DIR/scripts/validate_docstrings.py \ + --format=actions \ + -i '*' ES01 `# For now it is ok if docstrings are missing the extended summary` \ + -i pandas.Series.dt PR01 `# Accessors are implemented as classes, but we do not document the Parameters section` \ + -i pandas.Categorical.__array__ SA01\ + -i pandas.Categorical.codes SA01\ + -i pandas.Categorical.dtype SA01\ + -i pandas.Categorical.from_codes SA01\ + -i pandas.Categorical.ordered SA01\ + -i pandas.CategoricalDtype.categories SA01\ + -i pandas.CategoricalDtype.ordered SA01\ + -i pandas.CategoricalIndex.codes SA01\ + -i pandas.CategoricalIndex.ordered SA01\ + -i pandas.DataFrame.__dataframe__ SA01\ + -i pandas.DataFrame.__iter__ SA01\ + -i pandas.DataFrame.assign SA01\ + -i pandas.DataFrame.at_time PR01\ + -i pandas.DataFrame.axes SA01\ + -i pandas.DataFrame.backfill PR01,SA01\ + -i pandas.DataFrame.bfill SA01\ + -i pandas.DataFrame.columns SA01\ + -i pandas.DataFrame.copy SA01\ + -i pandas.DataFrame.droplevel SA01\ + -i pandas.DataFrame.dtypes SA01\ + -i pandas.DataFrame.ffill SA01\ + -i pandas.DataFrame.first_valid_index SA01\ + -i pandas.DataFrame.get SA01\ + -i pandas.DataFrame.hist RT03\ + -i pandas.DataFrame.infer_objects RT03\ + -i pandas.DataFrame.keys SA01\ + -i pandas.DataFrame.kurt RT03,SA01\ + -i pandas.DataFrame.kurtosis RT03,SA01\ + -i pandas.DataFrame.last_valid_index SA01\ + -i pandas.DataFrame.mask RT03\ + -i pandas.DataFrame.max RT03\ + -i pandas.DataFrame.mean RT03,SA01\ + -i pandas.DataFrame.median RT03,SA01\ + -i pandas.DataFrame.min RT03\ + -i pandas.DataFrame.pad PR01,SA01\ + -i pandas.DataFrame.plot PR02,SA01\ + -i pandas.DataFrame.pop SA01\ + -i pandas.DataFrame.prod RT03\ + -i pandas.DataFrame.product RT03\ + -i pandas.DataFrame.reorder_levels SA01\ + -i pandas.DataFrame.sem PR01,RT03,SA01\ + -i pandas.DataFrame.skew RT03,SA01\ + -i pandas.DataFrame.sparse PR01,SA01\ + -i pandas.DataFrame.sparse.density SA01\ + -i pandas.DataFrame.sparse.from_spmatrix SA01\ + -i pandas.DataFrame.sparse.to_coo SA01\ + -i pandas.DataFrame.sparse.to_dense SA01\ + -i pandas.DataFrame.std PR01,RT03,SA01\ + -i pandas.DataFrame.sum RT03\ + -i pandas.DataFrame.swapaxes PR01,SA01\ + -i pandas.DataFrame.swaplevel SA01\ + -i pandas.DataFrame.to_feather SA01\ + -i pandas.DataFrame.to_markdown SA01\ + -i pandas.DataFrame.to_parquet RT03\ + -i pandas.DataFrame.to_period SA01\ + -i pandas.DataFrame.to_timestamp SA01\ + -i pandas.DataFrame.tz_convert SA01\ + -i pandas.DataFrame.tz_localize SA01\ + -i pandas.DataFrame.unstack RT03\ + -i pandas.DataFrame.value_counts RT03\ + -i pandas.DataFrame.var PR01,RT03,SA01\ + -i pandas.DataFrame.where RT03\ + -i pandas.DatetimeIndex.ceil SA01\ + -i pandas.DatetimeIndex.date SA01\ + -i pandas.DatetimeIndex.day SA01\ + -i pandas.DatetimeIndex.day_name SA01\ + -i pandas.DatetimeIndex.day_of_year SA01\ + -i pandas.DatetimeIndex.dayofyear SA01\ + -i pandas.DatetimeIndex.floor SA01\ + -i pandas.DatetimeIndex.freqstr SA01\ + -i pandas.DatetimeIndex.hour SA01\ + -i pandas.DatetimeIndex.indexer_at_time PR01,RT03\ + -i pandas.DatetimeIndex.indexer_between_time RT03\ + -i pandas.DatetimeIndex.inferred_freq SA01\ + -i pandas.DatetimeIndex.is_leap_year SA01\ + -i pandas.DatetimeIndex.microsecond SA01\ + -i pandas.DatetimeIndex.minute SA01\ + -i pandas.DatetimeIndex.month SA01\ + -i pandas.DatetimeIndex.month_name SA01\ + -i pandas.DatetimeIndex.nanosecond SA01\ + -i pandas.DatetimeIndex.quarter SA01\ + -i pandas.DatetimeIndex.round SA01\ + -i pandas.DatetimeIndex.second SA01\ + -i pandas.DatetimeIndex.snap PR01,RT03,SA01\ + -i pandas.DatetimeIndex.std PR01,RT03\ + -i pandas.DatetimeIndex.time SA01\ + -i pandas.DatetimeIndex.timetz SA01\ + -i pandas.DatetimeIndex.to_period RT03\ + -i pandas.DatetimeIndex.to_pydatetime RT03,SA01\ + -i pandas.DatetimeIndex.tz SA01\ + -i pandas.DatetimeIndex.tz_convert RT03\ + -i pandas.DatetimeIndex.year SA01\ + -i pandas.DatetimeTZDtype SA01\ + -i pandas.DatetimeTZDtype.tz SA01\ + -i pandas.DatetimeTZDtype.unit SA01\ + -i pandas.ExcelFile PR01,SA01\ + -i pandas.ExcelFile.parse PR01,SA01\ + -i pandas.ExcelWriter SA01\ + -i pandas.Float32Dtype SA01\ + -i pandas.Float64Dtype SA01\ + -i pandas.Grouper PR02,SA01\ + -i pandas.HDFStore.append PR01,SA01\ + -i pandas.HDFStore.get SA01\ + -i pandas.HDFStore.groups SA01\ + -i pandas.HDFStore.info RT03,SA01\ + -i pandas.HDFStore.keys SA01\ + -i pandas.HDFStore.put PR01,SA01\ + -i pandas.HDFStore.select SA01\ + -i pandas.HDFStore.walk SA01\ + -i pandas.Index PR07\ + -i pandas.Index.T SA01\ + -i pandas.Index.append PR07,RT03,SA01\ + -i pandas.Index.astype SA01\ + -i pandas.Index.copy PR07,SA01\ + -i pandas.Index.difference PR07,RT03,SA01\ + -i pandas.Index.drop PR07,SA01\ + -i pandas.Index.drop_duplicates RT03\ + -i pandas.Index.droplevel RT03,SA01\ + -i pandas.Index.dropna RT03,SA01\ + -i pandas.Index.dtype SA01\ + -i pandas.Index.duplicated RT03\ + -i pandas.Index.empty GL08\ + -i pandas.Index.equals SA01\ + -i pandas.Index.fillna RT03\ + -i pandas.Index.get_indexer PR07,SA01\ + -i pandas.Index.get_indexer_for PR01,SA01\ + -i pandas.Index.get_indexer_non_unique PR07,SA01\ + -i pandas.Index.get_loc PR07,RT03,SA01\ + -i pandas.Index.get_slice_bound PR07\ + -i pandas.Index.hasnans SA01\ + -i pandas.Index.identical PR01,SA01\ + -i pandas.Index.inferred_type SA01\ + -i pandas.Index.insert PR07,RT03,SA01\ + -i pandas.Index.intersection PR07,RT03,SA01\ + -i pandas.Index.item SA01\ + -i pandas.Index.join PR07,RT03,SA01\ + -i pandas.Index.map SA01\ + -i pandas.Index.memory_usage RT03\ + -i pandas.Index.name SA01\ + -i pandas.Index.names GL08\ + -i pandas.Index.nbytes SA01\ + -i pandas.Index.ndim SA01\ + -i pandas.Index.nunique RT03\ + -i pandas.Index.putmask PR01,RT03\ + -i pandas.Index.ravel PR01,RT03\ + -i pandas.Index.reindex PR07\ + -i pandas.Index.shape SA01\ + -i pandas.Index.size SA01\ + -i pandas.Index.slice_indexer PR07,RT03,SA01\ + -i pandas.Index.slice_locs RT03\ + -i pandas.Index.str PR01,SA01\ + -i pandas.Index.symmetric_difference PR07,RT03,SA01\ + -i pandas.Index.take PR01,PR07\ + -i pandas.Index.to_list RT03\ + -i pandas.Index.union PR07,RT03,SA01\ + -i pandas.Index.unique RT03\ + -i pandas.Index.value_counts RT03\ + -i pandas.Index.view GL08\ + -i pandas.Int16Dtype SA01\ + -i pandas.Int32Dtype SA01\ + -i pandas.Int64Dtype SA01\ + -i pandas.Int8Dtype SA01\ + -i pandas.Interval PR02\ + -i pandas.Interval.closed SA01\ + -i pandas.Interval.left SA01\ + -i pandas.Interval.mid SA01\ + -i pandas.Interval.right SA01\ + -i pandas.IntervalDtype PR01,SA01\ + -i pandas.IntervalDtype.subtype SA01\ + -i pandas.IntervalIndex.closed SA01\ + -i pandas.IntervalIndex.contains RT03\ + -i pandas.IntervalIndex.get_indexer PR07,SA01\ + -i pandas.IntervalIndex.get_loc PR07,RT03,SA01\ + -i pandas.IntervalIndex.is_non_overlapping_monotonic SA01\ + -i pandas.IntervalIndex.left GL08\ + -i pandas.IntervalIndex.length GL08\ + -i pandas.IntervalIndex.mid GL08\ + -i pandas.IntervalIndex.right GL08\ + -i pandas.IntervalIndex.set_closed RT03,SA01\ + -i pandas.IntervalIndex.to_tuples RT03,SA01\ + -i pandas.MultiIndex PR01\ + -i pandas.MultiIndex.append PR07,SA01\ + -i pandas.MultiIndex.copy PR07,RT03,SA01\ + -i pandas.MultiIndex.drop PR07,RT03,SA01\ + -i pandas.MultiIndex.droplevel RT03,SA01\ + -i pandas.MultiIndex.dtypes SA01\ + -i pandas.MultiIndex.get_indexer PR07,SA01\ + -i pandas.MultiIndex.get_level_values SA01\ + -i pandas.MultiIndex.get_loc PR07\ + -i pandas.MultiIndex.get_loc_level PR07\ + -i pandas.MultiIndex.levels SA01\ + -i pandas.MultiIndex.levshape SA01\ + -i pandas.MultiIndex.names SA01\ + -i pandas.MultiIndex.nlevels SA01\ + -i pandas.MultiIndex.remove_unused_levels RT03,SA01\ + -i pandas.MultiIndex.reorder_levels RT03,SA01\ + -i pandas.MultiIndex.set_codes SA01\ + -i pandas.MultiIndex.set_levels RT03,SA01\ + -i pandas.MultiIndex.sortlevel PR07,SA01\ + -i pandas.MultiIndex.to_frame RT03\ + -i pandas.MultiIndex.truncate SA01\ + -i pandas.NA SA01\ + -i pandas.NaT SA01\ + -i pandas.NamedAgg SA01\ + -i pandas.Period SA01\ + -i pandas.Period.asfreq SA01\ + -i pandas.Period.freq GL08\ + -i pandas.Period.freqstr SA01\ + -i pandas.Period.is_leap_year SA01\ + -i pandas.Period.month SA01\ + -i pandas.Period.now SA01\ + -i pandas.Period.ordinal GL08\ + -i pandas.Period.quarter SA01\ + -i pandas.Period.strftime PR01,SA01\ + -i pandas.Period.to_timestamp SA01\ + -i pandas.Period.year SA01\ + -i pandas.PeriodDtype SA01\ + -i pandas.PeriodDtype.freq SA01\ + -i pandas.PeriodIndex.day SA01\ + -i pandas.PeriodIndex.day_of_week SA01\ + -i pandas.PeriodIndex.day_of_year SA01\ + -i pandas.PeriodIndex.dayofweek SA01\ + -i pandas.PeriodIndex.dayofyear SA01\ + -i pandas.PeriodIndex.days_in_month SA01\ + -i pandas.PeriodIndex.daysinmonth SA01\ + -i pandas.PeriodIndex.freqstr SA01\ + -i pandas.PeriodIndex.from_fields PR07,SA01\ + -i pandas.PeriodIndex.from_ordinals SA01\ + -i pandas.PeriodIndex.hour SA01\ + -i pandas.PeriodIndex.is_leap_year SA01\ + -i pandas.PeriodIndex.minute SA01\ + -i pandas.PeriodIndex.month SA01\ + -i pandas.PeriodIndex.quarter SA01\ + -i pandas.PeriodIndex.qyear GL08\ + -i pandas.PeriodIndex.second SA01\ + -i pandas.PeriodIndex.to_timestamp RT03,SA01\ + -i pandas.PeriodIndex.week SA01\ + -i pandas.PeriodIndex.weekday SA01\ + -i pandas.PeriodIndex.weekofyear SA01\ + -i pandas.PeriodIndex.year SA01\ + -i pandas.RangeIndex PR07\ + -i pandas.RangeIndex.from_range PR01,SA01\ + -i pandas.RangeIndex.start SA01\ + -i pandas.RangeIndex.step SA01\ + -i pandas.RangeIndex.stop SA01\ + -i pandas.Series SA01\ + -i pandas.Series.T SA01\ + -i pandas.Series.__iter__ RT03,SA01\ + -i pandas.Series.add PR07\ + -i pandas.Series.at_time PR01\ + -i pandas.Series.backfill PR01,SA01\ + -i pandas.Series.bfill SA01\ + -i pandas.Series.case_when RT03\ + -i pandas.Series.cat PR07,SA01\ + -i pandas.Series.cat.add_categories PR01,PR02\ + -i pandas.Series.cat.as_ordered PR01\ + -i pandas.Series.cat.as_unordered PR01\ + -i pandas.Series.cat.codes SA01\ + -i pandas.Series.cat.ordered SA01\ + -i pandas.Series.cat.remove_categories PR01,PR02\ + -i pandas.Series.cat.remove_unused_categories PR01\ + -i pandas.Series.cat.rename_categories PR01,PR02\ + -i pandas.Series.cat.reorder_categories PR01,PR02\ + -i pandas.Series.cat.set_categories PR01,PR02\ + -i pandas.Series.copy SA01\ + -i pandas.Series.div PR07\ + -i pandas.Series.droplevel SA01\ + -i pandas.Series.dt.as_unit PR01,PR02\ + -i pandas.Series.dt.ceil PR01,PR02,SA01\ + -i pandas.Series.dt.components SA01\ + -i pandas.Series.dt.date SA01\ + -i pandas.Series.dt.day SA01\ + -i pandas.Series.dt.day_name PR01,PR02,SA01\ + -i pandas.Series.dt.day_of_year SA01\ + -i pandas.Series.dt.dayofyear SA01\ + -i pandas.Series.dt.days SA01\ + -i pandas.Series.dt.days_in_month SA01\ + -i pandas.Series.dt.daysinmonth SA01\ + -i pandas.Series.dt.floor PR01,PR02,SA01\ + -i pandas.Series.dt.freq GL08\ + -i pandas.Series.dt.hour SA01\ + -i pandas.Series.dt.is_leap_year SA01\ + -i pandas.Series.dt.microsecond SA01\ + -i pandas.Series.dt.microseconds SA01\ + -i pandas.Series.dt.minute SA01\ + -i pandas.Series.dt.month SA01\ + -i pandas.Series.dt.month_name PR01,PR02,SA01\ + -i pandas.Series.dt.nanosecond SA01\ + -i pandas.Series.dt.nanoseconds SA01\ + -i pandas.Series.dt.normalize PR01\ + -i pandas.Series.dt.quarter SA01\ + -i pandas.Series.dt.qyear GL08\ + -i pandas.Series.dt.round PR01,PR02,SA01\ + -i pandas.Series.dt.second SA01\ + -i pandas.Series.dt.seconds SA01\ + -i pandas.Series.dt.strftime PR01,PR02\ + -i pandas.Series.dt.time SA01\ + -i pandas.Series.dt.timetz SA01\ + -i pandas.Series.dt.to_period PR01,PR02,RT03\ + -i pandas.Series.dt.total_seconds PR01\ + -i pandas.Series.dt.tz SA01\ + -i pandas.Series.dt.tz_convert PR01,PR02,RT03\ + -i pandas.Series.dt.tz_localize PR01,PR02\ + -i pandas.Series.dt.unit GL08\ + -i pandas.Series.dt.year SA01\ + -i pandas.Series.dtype SA01\ + -i pandas.Series.dtypes SA01\ + -i pandas.Series.empty GL08\ + -i pandas.Series.eq PR07,SA01\ + -i pandas.Series.ffill SA01\ + -i pandas.Series.first_valid_index SA01\ + -i pandas.Series.floordiv PR07\ + -i pandas.Series.ge PR07,SA01\ + -i pandas.Series.get SA01\ + -i pandas.Series.gt PR07,SA01\ + -i pandas.Series.hasnans SA01\ + -i pandas.Series.infer_objects RT03\ + -i pandas.Series.is_monotonic_decreasing SA01\ + -i pandas.Series.is_monotonic_increasing SA01\ + -i pandas.Series.is_unique SA01\ + -i pandas.Series.item SA01\ + -i pandas.Series.keys SA01\ + -i pandas.Series.kurt RT03,SA01\ + -i pandas.Series.kurtosis RT03,SA01\ + -i pandas.Series.last_valid_index SA01\ + -i pandas.Series.le PR07,SA01\ + -i pandas.Series.list.__getitem__ SA01\ + -i pandas.Series.list.flatten SA01\ + -i pandas.Series.list.len SA01\ + -i pandas.Series.lt PR07,SA01\ + -i pandas.Series.mask RT03\ + -i pandas.Series.max RT03\ + -i pandas.Series.mean RT03,SA01\ + -i pandas.Series.median RT03,SA01\ + -i pandas.Series.min RT03\ + -i pandas.Series.mod PR07\ + -i pandas.Series.mode SA01\ + -i pandas.Series.mul PR07\ + -i pandas.Series.nbytes SA01\ + -i pandas.Series.ndim SA01\ + -i pandas.Series.ne PR07,SA01\ + -i pandas.Series.nunique RT03\ + -i pandas.Series.pad PR01,SA01\ + -i pandas.Series.plot PR02,SA01\ + -i pandas.Series.pop RT03,SA01\ + -i pandas.Series.pow PR07\ + -i pandas.Series.prod RT03\ + -i pandas.Series.product RT03\ + -i pandas.Series.radd PR07\ + -i pandas.Series.rdiv PR07\ + -i pandas.Series.reorder_levels RT03,SA01\ + -i pandas.Series.rfloordiv PR07\ + -i pandas.Series.rmod PR07\ + -i pandas.Series.rmul PR07\ + -i pandas.Series.rpow PR07\ + -i pandas.Series.rsub PR07\ + -i pandas.Series.rtruediv PR07\ + -i pandas.Series.sem PR01,RT03,SA01\ + -i pandas.Series.shape SA01\ + -i pandas.Series.size SA01\ + -i pandas.Series.skew RT03,SA01\ + -i pandas.Series.sparse PR01,SA01\ + -i pandas.Series.sparse.density SA01\ + -i pandas.Series.sparse.fill_value SA01\ + -i pandas.Series.sparse.from_coo PR07,SA01\ + -i pandas.Series.sparse.npoints SA01\ + -i pandas.Series.sparse.sp_values SA01\ + -i pandas.Series.sparse.to_coo PR07,RT03,SA01\ + -i pandas.Series.std PR01,RT03,SA01\ + -i pandas.Series.str PR01,SA01\ + -i pandas.Series.str.capitalize RT03\ + -i pandas.Series.str.casefold RT03\ + -i pandas.Series.str.center RT03,SA01\ + -i pandas.Series.str.decode PR07,RT03,SA01\ + -i pandas.Series.str.encode PR07,RT03,SA01\ + -i pandas.Series.str.find RT03\ + -i pandas.Series.str.fullmatch RT03\ + -i pandas.Series.str.get RT03,SA01\ + -i pandas.Series.str.index RT03\ + -i pandas.Series.str.ljust RT03,SA01\ + -i pandas.Series.str.lower RT03\ + -i pandas.Series.str.lstrip RT03\ + -i pandas.Series.str.match RT03\ + -i pandas.Series.str.normalize RT03,SA01\ + -i pandas.Series.str.partition RT03\ + -i pandas.Series.str.repeat SA01\ + -i pandas.Series.str.replace SA01\ + -i pandas.Series.str.rfind RT03\ + -i pandas.Series.str.rindex RT03\ + -i pandas.Series.str.rjust RT03,SA01\ + -i pandas.Series.str.rpartition RT03\ + -i pandas.Series.str.rstrip RT03\ + -i pandas.Series.str.strip RT03\ + -i pandas.Series.str.swapcase RT03\ + -i pandas.Series.str.title RT03\ + -i pandas.Series.str.translate RT03,SA01\ + -i pandas.Series.str.upper RT03\ + -i pandas.Series.str.wrap RT03,SA01\ + -i pandas.Series.str.zfill RT03\ + -i pandas.Series.struct.dtypes SA01\ + -i pandas.Series.sub PR07\ + -i pandas.Series.sum RT03\ + -i pandas.Series.swaplevel SA01\ + -i pandas.Series.to_dict SA01\ + -i pandas.Series.to_frame SA01\ + -i pandas.Series.to_list RT03\ + -i pandas.Series.to_markdown SA01\ + -i pandas.Series.to_period SA01\ + -i pandas.Series.to_string SA01\ + -i pandas.Series.to_timestamp RT03,SA01\ + -i pandas.Series.truediv PR07\ + -i pandas.Series.tz_convert SA01\ + -i pandas.Series.tz_localize SA01\ + -i pandas.Series.unstack SA01\ + -i pandas.Series.update PR07,SA01\ + -i pandas.Series.value_counts RT03\ + -i pandas.Series.var PR01,RT03,SA01\ + -i pandas.Series.where RT03\ + -i pandas.SparseDtype SA01\ + -i pandas.Timedelta PR07,SA01\ + -i pandas.Timedelta.as_unit SA01\ + -i pandas.Timedelta.asm8 SA01\ + -i pandas.Timedelta.ceil SA01\ + -i pandas.Timedelta.components SA01\ + -i pandas.Timedelta.days SA01\ + -i pandas.Timedelta.floor SA01\ + -i pandas.Timedelta.max PR02,PR07,SA01\ + -i pandas.Timedelta.min PR02,PR07,SA01\ + -i pandas.Timedelta.resolution PR02,PR07,SA01\ + -i pandas.Timedelta.round SA01\ + -i pandas.Timedelta.to_numpy PR01\ + -i pandas.Timedelta.to_timedelta64 SA01\ + -i pandas.Timedelta.total_seconds SA01\ + -i pandas.Timedelta.view SA01\ + -i pandas.TimedeltaIndex PR01\ + -i pandas.TimedeltaIndex.as_unit RT03,SA01\ + -i pandas.TimedeltaIndex.ceil SA01\ + -i pandas.TimedeltaIndex.components SA01\ + -i pandas.TimedeltaIndex.days SA01\ + -i pandas.TimedeltaIndex.floor SA01\ + -i pandas.TimedeltaIndex.inferred_freq SA01\ + -i pandas.TimedeltaIndex.microseconds SA01\ + -i pandas.TimedeltaIndex.nanoseconds SA01\ + -i pandas.TimedeltaIndex.round SA01\ + -i pandas.TimedeltaIndex.seconds SA01\ + -i pandas.TimedeltaIndex.to_pytimedelta RT03,SA01\ + -i pandas.Timestamp PR07,SA01\ + -i pandas.Timestamp.as_unit SA01\ + -i pandas.Timestamp.asm8 SA01\ + -i pandas.Timestamp.astimezone SA01\ + -i pandas.Timestamp.ceil SA01\ + -i pandas.Timestamp.combine PR01,SA01\ + -i pandas.Timestamp.ctime SA01\ + -i pandas.Timestamp.date SA01\ + -i pandas.Timestamp.day GL08\ + -i pandas.Timestamp.day_name SA01\ + -i pandas.Timestamp.day_of_week SA01\ + -i pandas.Timestamp.day_of_year SA01\ + -i pandas.Timestamp.dayofweek SA01\ + -i pandas.Timestamp.dayofyear SA01\ + -i pandas.Timestamp.days_in_month SA01\ + -i pandas.Timestamp.daysinmonth SA01\ + -i pandas.Timestamp.dst SA01\ + -i pandas.Timestamp.floor SA01\ + -i pandas.Timestamp.fold GL08\ + -i pandas.Timestamp.fromordinal SA01\ + -i pandas.Timestamp.fromtimestamp PR01,SA01\ + -i pandas.Timestamp.hour GL08\ + -i pandas.Timestamp.is_leap_year SA01\ + -i pandas.Timestamp.isocalendar SA01\ + -i pandas.Timestamp.isoformat SA01\ + -i pandas.Timestamp.isoweekday SA01\ + -i pandas.Timestamp.max PR02,PR07,SA01\ + -i pandas.Timestamp.microsecond GL08\ + -i pandas.Timestamp.min PR02,PR07,SA01\ + -i pandas.Timestamp.minute GL08\ + -i pandas.Timestamp.month GL08\ + -i pandas.Timestamp.month_name SA01\ + -i pandas.Timestamp.nanosecond GL08\ + -i pandas.Timestamp.normalize SA01\ + -i pandas.Timestamp.now SA01\ + -i pandas.Timestamp.quarter SA01\ + -i pandas.Timestamp.replace PR07,SA01\ + -i pandas.Timestamp.resolution PR02,PR07,SA01\ + -i pandas.Timestamp.round SA01\ + -i pandas.Timestamp.second GL08\ + -i pandas.Timestamp.strftime SA01\ + -i pandas.Timestamp.strptime PR01,SA01\ + -i pandas.Timestamp.time SA01\ + -i pandas.Timestamp.timestamp SA01\ + -i pandas.Timestamp.timetuple SA01\ + -i pandas.Timestamp.timetz SA01\ + -i pandas.Timestamp.to_datetime64 SA01\ + -i pandas.Timestamp.to_julian_date SA01\ + -i pandas.Timestamp.to_numpy PR01\ + -i pandas.Timestamp.to_period PR01,SA01\ + -i pandas.Timestamp.to_pydatetime PR01,SA01\ + -i pandas.Timestamp.today SA01\ + -i pandas.Timestamp.toordinal SA01\ + -i pandas.Timestamp.tz SA01\ + -i pandas.Timestamp.tz_convert SA01\ + -i pandas.Timestamp.tz_localize SA01\ + -i pandas.Timestamp.tzinfo GL08\ + -i pandas.Timestamp.tzname SA01\ + -i pandas.Timestamp.unit SA01\ + -i pandas.Timestamp.utcfromtimestamp PR01,SA01\ + -i pandas.Timestamp.utcnow SA01\ + -i pandas.Timestamp.utcoffset SA01\ + -i pandas.Timestamp.utctimetuple SA01\ + -i pandas.Timestamp.value GL08\ + -i pandas.Timestamp.week SA01\ + -i pandas.Timestamp.weekday SA01\ + -i pandas.Timestamp.weekofyear SA01\ + -i pandas.Timestamp.year GL08\ + -i pandas.UInt16Dtype SA01\ + -i pandas.UInt32Dtype SA01\ + -i pandas.UInt64Dtype SA01\ + -i pandas.UInt8Dtype SA01\ + -i pandas.api.extensions.ExtensionArray SA01\ + -i pandas.api.extensions.ExtensionArray._accumulate RT03,SA01\ + -i pandas.api.extensions.ExtensionArray._concat_same_type PR07,SA01\ + -i pandas.api.extensions.ExtensionArray._formatter SA01\ + -i pandas.api.extensions.ExtensionArray._from_sequence SA01\ + -i pandas.api.extensions.ExtensionArray._from_sequence_of_strings SA01\ + -i pandas.api.extensions.ExtensionArray._hash_pandas_object RT03,SA01\ + -i pandas.api.extensions.ExtensionArray._pad_or_backfill PR01,RT03,SA01\ + -i pandas.api.extensions.ExtensionArray._reduce RT03,SA01\ + -i pandas.api.extensions.ExtensionArray._values_for_factorize SA01\ + -i pandas.api.extensions.ExtensionArray.astype SA01\ + -i pandas.api.extensions.ExtensionArray.copy RT03,SA01\ + -i pandas.api.extensions.ExtensionArray.dropna RT03,SA01\ + -i pandas.api.extensions.ExtensionArray.dtype SA01\ + -i pandas.api.extensions.ExtensionArray.duplicated RT03,SA01\ + -i pandas.api.extensions.ExtensionArray.equals SA01\ + -i pandas.api.extensions.ExtensionArray.fillna SA01\ + -i pandas.api.extensions.ExtensionArray.insert PR07,RT03,SA01\ + -i pandas.api.extensions.ExtensionArray.interpolate PR01,SA01\ + -i pandas.api.extensions.ExtensionArray.isin PR07,RT03,SA01\ + -i pandas.api.extensions.ExtensionArray.isna SA01\ + -i pandas.api.extensions.ExtensionArray.nbytes SA01\ + -i pandas.api.extensions.ExtensionArray.ndim SA01\ + -i pandas.api.extensions.ExtensionArray.ravel RT03,SA01\ + -i pandas.api.extensions.ExtensionArray.shape SA01\ + -i pandas.api.extensions.ExtensionArray.shift SA01\ + -i pandas.api.extensions.ExtensionArray.take RT03\ + -i pandas.api.extensions.ExtensionArray.tolist RT03,SA01\ + -i pandas.api.extensions.ExtensionArray.unique RT03,SA01\ + -i pandas.api.extensions.ExtensionArray.view SA01\ + -i pandas.api.extensions.register_extension_dtype SA01\ + -i pandas.api.indexers.BaseIndexer PR01,SA01\ + -i pandas.api.indexers.FixedForwardWindowIndexer PR01,SA01\ + -i pandas.api.indexers.VariableOffsetWindowIndexer PR01,SA01\ + -i pandas.api.interchange.from_dataframe RT03,SA01\ + -i pandas.api.types.infer_dtype PR07,SA01\ + -i pandas.api.types.is_any_real_numeric_dtype SA01\ + -i pandas.api.types.is_bool PR01,SA01\ + -i pandas.api.types.is_bool_dtype SA01\ + -i pandas.api.types.is_categorical_dtype SA01\ + -i pandas.api.types.is_complex PR01,SA01\ + -i pandas.api.types.is_complex_dtype SA01\ + -i pandas.api.types.is_datetime64_any_dtype SA01\ + -i pandas.api.types.is_datetime64_dtype SA01\ + -i pandas.api.types.is_datetime64_ns_dtype SA01\ + -i pandas.api.types.is_datetime64tz_dtype SA01\ + -i pandas.api.types.is_dict_like PR07,SA01\ + -i pandas.api.types.is_extension_array_dtype SA01\ + -i pandas.api.types.is_file_like PR07,SA01\ + -i pandas.api.types.is_float PR01,SA01\ + -i pandas.api.types.is_float_dtype SA01\ + -i pandas.api.types.is_hashable PR01,RT03,SA01\ + -i pandas.api.types.is_int64_dtype SA01\ + -i pandas.api.types.is_integer PR01,SA01\ + -i pandas.api.types.is_integer_dtype SA01\ + -i pandas.api.types.is_interval_dtype SA01\ + -i pandas.api.types.is_iterator PR07,SA01\ + -i pandas.api.types.is_list_like SA01\ + -i pandas.api.types.is_named_tuple PR07,SA01\ + -i pandas.api.types.is_numeric_dtype SA01\ + -i pandas.api.types.is_object_dtype SA01\ + -i pandas.api.types.is_period_dtype SA01\ + -i pandas.api.types.is_re PR07,SA01\ + -i pandas.api.types.is_re_compilable PR07,SA01\ + -i pandas.api.types.is_scalar SA01\ + -i pandas.api.types.is_signed_integer_dtype SA01\ + -i pandas.api.types.is_sparse SA01\ + -i pandas.api.types.is_string_dtype SA01\ + -i pandas.api.types.is_timedelta64_dtype SA01\ + -i pandas.api.types.is_timedelta64_ns_dtype SA01\ + -i pandas.api.types.is_unsigned_integer_dtype SA01\ + -i pandas.api.types.pandas_dtype PR07,RT03,SA01\ + -i pandas.api.types.union_categoricals RT03,SA01\ + -i pandas.arrays.ArrowExtensionArray PR07,SA01\ + -i pandas.arrays.BooleanArray SA01\ + -i pandas.arrays.DatetimeArray SA01\ + -i pandas.arrays.FloatingArray SA01\ + -i pandas.arrays.IntegerArray SA01\ + -i pandas.arrays.IntervalArray.closed SA01\ + -i pandas.arrays.IntervalArray.contains RT03\ + -i pandas.arrays.IntervalArray.is_non_overlapping_monotonic SA01\ + -i pandas.arrays.IntervalArray.left SA01\ + -i pandas.arrays.IntervalArray.length SA01\ + -i pandas.arrays.IntervalArray.mid SA01\ + -i pandas.arrays.IntervalArray.right SA01\ + -i pandas.arrays.IntervalArray.set_closed RT03,SA01\ + -i pandas.arrays.IntervalArray.to_tuples RT03,SA01\ + -i pandas.arrays.NumpyExtensionArray SA01\ + -i pandas.arrays.SparseArray PR07,SA01\ + -i pandas.arrays.TimedeltaArray PR07,SA01\ + -i pandas.bdate_range RT03,SA01\ + -i pandas.core.groupby.DataFrameGroupBy.__iter__ RT03,SA01\ + -i pandas.core.groupby.DataFrameGroupBy.agg RT03\ + -i pandas.core.groupby.DataFrameGroupBy.aggregate RT03\ + -i pandas.core.groupby.DataFrameGroupBy.apply RT03\ + -i pandas.core.groupby.DataFrameGroupBy.boxplot PR07,RT03,SA01\ + -i pandas.core.groupby.DataFrameGroupBy.cummax RT03\ + -i pandas.core.groupby.DataFrameGroupBy.cummin RT03\ + -i pandas.core.groupby.DataFrameGroupBy.cumprod RT03\ + -i pandas.core.groupby.DataFrameGroupBy.cumsum RT03\ + -i pandas.core.groupby.DataFrameGroupBy.filter RT03,SA01\ + -i pandas.core.groupby.DataFrameGroupBy.get_group RT03,SA01\ + -i pandas.core.groupby.DataFrameGroupBy.groups SA01\ + -i pandas.core.groupby.DataFrameGroupBy.hist RT03\ + -i pandas.core.groupby.DataFrameGroupBy.indices SA01\ + -i pandas.core.groupby.DataFrameGroupBy.max SA01\ + -i pandas.core.groupby.DataFrameGroupBy.mean RT03\ + -i pandas.core.groupby.DataFrameGroupBy.median SA01\ + -i pandas.core.groupby.DataFrameGroupBy.min SA01\ + -i pandas.core.groupby.DataFrameGroupBy.nth PR02\ + -i pandas.core.groupby.DataFrameGroupBy.nunique RT03,SA01\ + -i pandas.core.groupby.DataFrameGroupBy.ohlc SA01\ + -i pandas.core.groupby.DataFrameGroupBy.plot PR02,SA01\ + -i pandas.core.groupby.DataFrameGroupBy.prod SA01\ + -i pandas.core.groupby.DataFrameGroupBy.rank RT03\ + -i pandas.core.groupby.DataFrameGroupBy.resample RT03\ + -i pandas.core.groupby.DataFrameGroupBy.sem SA01\ + -i pandas.core.groupby.DataFrameGroupBy.skew RT03\ + -i pandas.core.groupby.DataFrameGroupBy.sum SA01\ + -i pandas.core.groupby.DataFrameGroupBy.transform RT03\ + -i pandas.core.groupby.SeriesGroupBy.__iter__ RT03,SA01\ + -i pandas.core.groupby.SeriesGroupBy.agg RT03\ + -i pandas.core.groupby.SeriesGroupBy.aggregate RT03\ + -i pandas.core.groupby.SeriesGroupBy.apply RT03\ + -i pandas.core.groupby.SeriesGroupBy.cummax RT03\ + -i pandas.core.groupby.SeriesGroupBy.cummin RT03\ + -i pandas.core.groupby.SeriesGroupBy.cumprod RT03\ + -i pandas.core.groupby.SeriesGroupBy.cumsum RT03\ + -i pandas.core.groupby.SeriesGroupBy.filter PR01,RT03,SA01\ + -i pandas.core.groupby.SeriesGroupBy.get_group RT03,SA01\ + -i pandas.core.groupby.SeriesGroupBy.groups SA01\ + -i pandas.core.groupby.SeriesGroupBy.indices SA01\ + -i pandas.core.groupby.SeriesGroupBy.is_monotonic_decreasing SA01\ + -i pandas.core.groupby.SeriesGroupBy.is_monotonic_increasing SA01\ + -i pandas.core.groupby.SeriesGroupBy.max SA01\ + -i pandas.core.groupby.SeriesGroupBy.mean RT03\ + -i pandas.core.groupby.SeriesGroupBy.median SA01\ + -i pandas.core.groupby.SeriesGroupBy.min SA01\ + -i pandas.core.groupby.SeriesGroupBy.nth PR02\ + -i pandas.core.groupby.SeriesGroupBy.ohlc SA01\ + -i pandas.core.groupby.SeriesGroupBy.plot PR02,SA01\ + -i pandas.core.groupby.SeriesGroupBy.prod SA01\ + -i pandas.core.groupby.SeriesGroupBy.rank RT03\ + -i pandas.core.groupby.SeriesGroupBy.resample RT03\ + -i pandas.core.groupby.SeriesGroupBy.sem SA01\ + -i pandas.core.groupby.SeriesGroupBy.skew RT03\ + -i pandas.core.groupby.SeriesGroupBy.sum SA01\ + -i pandas.core.groupby.SeriesGroupBy.transform RT03\ + -i pandas.core.resample.Resampler.__iter__ RT03,SA01\ + -i pandas.core.resample.Resampler.ffill RT03\ + -i pandas.core.resample.Resampler.get_group RT03,SA01\ + -i pandas.core.resample.Resampler.groups SA01\ + -i pandas.core.resample.Resampler.indices SA01\ + -i pandas.core.resample.Resampler.max PR01,RT03,SA01\ + -i pandas.core.resample.Resampler.mean SA01\ + -i pandas.core.resample.Resampler.median SA01\ + -i pandas.core.resample.Resampler.min PR01,RT03,SA01\ + -i pandas.core.resample.Resampler.ohlc SA01\ + -i pandas.core.resample.Resampler.prod SA01\ + -i pandas.core.resample.Resampler.quantile PR01,PR07\ + -i pandas.core.resample.Resampler.sem SA01\ + -i pandas.core.resample.Resampler.std SA01\ + -i pandas.core.resample.Resampler.sum SA01\ + -i pandas.core.resample.Resampler.transform PR01,RT03,SA01\ + -i pandas.core.resample.Resampler.var SA01\ + -i pandas.core.window.expanding.Expanding.corr PR01\ + -i pandas.core.window.expanding.Expanding.count PR01\ + -i pandas.core.window.rolling.Rolling.max PR01\ + -i pandas.core.window.rolling.Window.std PR01\ + -i pandas.core.window.rolling.Window.var PR01\ + -i pandas.date_range RT03\ + -i pandas.describe_option SA01\ + -i pandas.errors.AbstractMethodError PR01,SA01\ + -i pandas.errors.AttributeConflictWarning SA01\ + -i pandas.errors.CSSWarning SA01\ + -i pandas.errors.CategoricalConversionWarning SA01\ + -i pandas.errors.ChainedAssignmentError SA01\ + -i pandas.errors.ClosedFileError SA01\ + -i pandas.errors.DataError SA01\ + -i pandas.errors.DuplicateLabelError SA01\ + -i pandas.errors.EmptyDataError SA01\ + -i pandas.errors.IntCastingNaNError SA01\ + -i pandas.errors.InvalidIndexError SA01\ + -i pandas.errors.InvalidVersion SA01\ + -i pandas.errors.MergeError SA01\ + -i pandas.errors.NullFrequencyError SA01\ + -i pandas.errors.NumExprClobberingError SA01\ + -i pandas.errors.NumbaUtilError SA01\ + -i pandas.errors.OptionError SA01\ + -i pandas.errors.OutOfBoundsDatetime SA01\ + -i pandas.errors.OutOfBoundsTimedelta SA01\ + -i pandas.errors.PerformanceWarning SA01\ + -i pandas.errors.PossibleDataLossError SA01\ + -i pandas.errors.PossiblePrecisionLoss SA01\ + -i pandas.errors.SpecificationError SA01\ + -i pandas.errors.UndefinedVariableError PR01,SA01\ + -i pandas.errors.UnsortedIndexError SA01\ + -i pandas.errors.UnsupportedFunctionCall SA01\ + -i pandas.errors.ValueLabelTypeMismatch SA01\ + -i pandas.get_option SA01\ + -i pandas.infer_freq SA01\ + -i pandas.interval_range RT03\ + -i pandas.io.formats.style.Styler.apply RT03\ + -i pandas.io.formats.style.Styler.apply_index RT03\ + -i pandas.io.formats.style.Styler.background_gradient RT03\ + -i pandas.io.formats.style.Styler.bar RT03,SA01\ + -i pandas.io.formats.style.Styler.clear SA01\ + -i pandas.io.formats.style.Styler.concat RT03,SA01\ + -i pandas.io.formats.style.Styler.export RT03\ + -i pandas.io.formats.style.Styler.format RT03\ + -i pandas.io.formats.style.Styler.format_index RT03\ + -i pandas.io.formats.style.Styler.from_custom_template SA01\ + -i pandas.io.formats.style.Styler.hide RT03,SA01\ + -i pandas.io.formats.style.Styler.highlight_between RT03\ + -i pandas.io.formats.style.Styler.highlight_max RT03\ + -i pandas.io.formats.style.Styler.highlight_min RT03\ + -i pandas.io.formats.style.Styler.highlight_null RT03\ + -i pandas.io.formats.style.Styler.highlight_quantile RT03\ + -i pandas.io.formats.style.Styler.map RT03\ + -i pandas.io.formats.style.Styler.map_index RT03\ + -i pandas.io.formats.style.Styler.relabel_index RT03\ + -i pandas.io.formats.style.Styler.set_caption RT03,SA01\ + -i pandas.io.formats.style.Styler.set_properties RT03,SA01\ + -i pandas.io.formats.style.Styler.set_sticky RT03,SA01\ + -i pandas.io.formats.style.Styler.set_table_attributes PR07,RT03\ + -i pandas.io.formats.style.Styler.set_table_styles RT03\ + -i pandas.io.formats.style.Styler.set_td_classes RT03\ + -i pandas.io.formats.style.Styler.set_tooltips RT03,SA01\ + -i pandas.io.formats.style.Styler.set_uuid PR07,RT03,SA01\ + -i pandas.io.formats.style.Styler.text_gradient RT03\ + -i pandas.io.formats.style.Styler.to_excel PR01\ + -i pandas.io.formats.style.Styler.to_string SA01\ + -i pandas.io.formats.style.Styler.use RT03\ + -i pandas.io.json.build_table_schema PR07,RT03,SA01\ + -i pandas.io.stata.StataReader.data_label SA01\ + -i pandas.io.stata.StataReader.value_labels RT03,SA01\ + -i pandas.io.stata.StataReader.variable_labels RT03,SA01\ + -i pandas.io.stata.StataWriter.write_file SA01\ + -i pandas.json_normalize RT03,SA01\ + -i pandas.merge PR07\ + -i pandas.merge_asof PR07,RT03\ + -i pandas.merge_ordered PR07\ + -i pandas.option_context SA01\ + -i pandas.period_range RT03,SA01\ + -i pandas.pivot PR07\ + -i pandas.pivot_table PR07\ + -i pandas.plotting.andrews_curves RT03,SA01\ + -i pandas.plotting.autocorrelation_plot RT03,SA01\ + -i pandas.plotting.lag_plot RT03,SA01\ + -i pandas.plotting.parallel_coordinates PR07,RT03,SA01\ + -i pandas.plotting.plot_params SA01\ + -i pandas.plotting.scatter_matrix PR07,SA01\ + -i pandas.plotting.table PR07,RT03,SA01\ + -i pandas.qcut PR07,SA01\ + -i pandas.read_feather SA01\ + -i pandas.read_orc SA01\ + -i pandas.read_sas SA01\ + -i pandas.read_spss SA01\ + -i pandas.reset_option SA01\ + -i pandas.set_eng_float_format RT03,SA01\ + -i pandas.set_option SA01\ + -i pandas.show_versions SA01\ + -i pandas.test SA01\ + -i pandas.testing.assert_extension_array_equal SA01\ + -i pandas.testing.assert_index_equal PR07,SA01\ + -i pandas.testing.assert_series_equal PR07,SA01\ + -i pandas.timedelta_range SA01\ + -i pandas.tseries.api.guess_datetime_format SA01\ + -i pandas.tseries.offsets.BDay PR02,SA01\ + -i pandas.tseries.offsets.BMonthBegin PR02\ + -i pandas.tseries.offsets.BMonthEnd PR02\ + -i pandas.tseries.offsets.BQuarterBegin PR02\ + -i pandas.tseries.offsets.BQuarterBegin.copy SA01\ + -i pandas.tseries.offsets.BQuarterBegin.freqstr SA01\ + -i pandas.tseries.offsets.BQuarterBegin.is_on_offset GL08\ + -i pandas.tseries.offsets.BQuarterBegin.kwds SA01\ + -i pandas.tseries.offsets.BQuarterBegin.n GL08\ + -i pandas.tseries.offsets.BQuarterBegin.name SA01\ + -i pandas.tseries.offsets.BQuarterBegin.nanos GL08\ + -i pandas.tseries.offsets.BQuarterBegin.normalize GL08\ + -i pandas.tseries.offsets.BQuarterBegin.rule_code GL08\ + -i pandas.tseries.offsets.BQuarterBegin.startingMonth GL08\ + -i pandas.tseries.offsets.BQuarterEnd PR02\ + -i pandas.tseries.offsets.BQuarterEnd.copy SA01\ + -i pandas.tseries.offsets.BQuarterEnd.freqstr SA01\ + -i pandas.tseries.offsets.BQuarterEnd.is_on_offset GL08\ + -i pandas.tseries.offsets.BQuarterEnd.kwds SA01\ + -i pandas.tseries.offsets.BQuarterEnd.n GL08\ + -i pandas.tseries.offsets.BQuarterEnd.name SA01\ + -i pandas.tseries.offsets.BQuarterEnd.nanos GL08\ + -i pandas.tseries.offsets.BQuarterEnd.normalize GL08\ + -i pandas.tseries.offsets.BQuarterEnd.rule_code GL08\ + -i pandas.tseries.offsets.BQuarterEnd.startingMonth GL08\ + -i pandas.tseries.offsets.BYearBegin PR02\ + -i pandas.tseries.offsets.BYearBegin.copy SA01\ + -i pandas.tseries.offsets.BYearBegin.freqstr SA01\ + -i pandas.tseries.offsets.BYearBegin.is_on_offset GL08\ + -i pandas.tseries.offsets.BYearBegin.kwds SA01\ + -i pandas.tseries.offsets.BYearBegin.month GL08\ + -i pandas.tseries.offsets.BYearBegin.n GL08\ + -i pandas.tseries.offsets.BYearBegin.name SA01\ + -i pandas.tseries.offsets.BYearBegin.nanos GL08\ + -i pandas.tseries.offsets.BYearBegin.normalize GL08\ + -i pandas.tseries.offsets.BYearBegin.rule_code GL08\ + -i pandas.tseries.offsets.BYearEnd PR02\ + -i pandas.tseries.offsets.BYearEnd.copy SA01\ + -i pandas.tseries.offsets.BYearEnd.freqstr SA01\ + -i pandas.tseries.offsets.BYearEnd.is_on_offset GL08\ + -i pandas.tseries.offsets.BYearEnd.kwds SA01\ + -i pandas.tseries.offsets.BYearEnd.month GL08\ + -i pandas.tseries.offsets.BYearEnd.n GL08\ + -i pandas.tseries.offsets.BYearEnd.name SA01\ + -i pandas.tseries.offsets.BYearEnd.nanos GL08\ + -i pandas.tseries.offsets.BYearEnd.normalize GL08\ + -i pandas.tseries.offsets.BYearEnd.rule_code GL08\ + -i pandas.tseries.offsets.BusinessDay PR02,SA01\ + -i pandas.tseries.offsets.BusinessDay.calendar GL08\ + -i pandas.tseries.offsets.BusinessDay.copy SA01\ + -i pandas.tseries.offsets.BusinessDay.freqstr SA01\ + -i pandas.tseries.offsets.BusinessDay.holidays GL08\ + -i pandas.tseries.offsets.BusinessDay.is_on_offset GL08\ + -i pandas.tseries.offsets.BusinessDay.kwds SA01\ + -i pandas.tseries.offsets.BusinessDay.n GL08\ + -i pandas.tseries.offsets.BusinessDay.name SA01\ + -i pandas.tseries.offsets.BusinessDay.nanos GL08\ + -i pandas.tseries.offsets.BusinessDay.normalize GL08\ + -i pandas.tseries.offsets.BusinessDay.rule_code GL08\ + -i pandas.tseries.offsets.BusinessDay.weekmask GL08\ + -i pandas.tseries.offsets.BusinessHour PR02,SA01\ + -i pandas.tseries.offsets.BusinessHour.calendar GL08\ + -i pandas.tseries.offsets.BusinessHour.copy SA01\ + -i pandas.tseries.offsets.BusinessHour.end GL08\ + -i pandas.tseries.offsets.BusinessHour.freqstr SA01\ + -i pandas.tseries.offsets.BusinessHour.holidays GL08\ + -i pandas.tseries.offsets.BusinessHour.is_on_offset GL08\ + -i pandas.tseries.offsets.BusinessHour.kwds SA01\ + -i pandas.tseries.offsets.BusinessHour.n GL08\ + -i pandas.tseries.offsets.BusinessHour.name SA01\ + -i pandas.tseries.offsets.BusinessHour.nanos GL08\ + -i pandas.tseries.offsets.BusinessHour.normalize GL08\ + -i pandas.tseries.offsets.BusinessHour.rule_code GL08\ + -i pandas.tseries.offsets.BusinessHour.start GL08\ + -i pandas.tseries.offsets.BusinessHour.weekmask GL08\ + -i pandas.tseries.offsets.BusinessMonthBegin PR02\ + -i pandas.tseries.offsets.BusinessMonthBegin.copy SA01\ + -i pandas.tseries.offsets.BusinessMonthBegin.freqstr SA01\ + -i pandas.tseries.offsets.BusinessMonthBegin.is_on_offset GL08\ + -i pandas.tseries.offsets.BusinessMonthBegin.kwds SA01\ + -i pandas.tseries.offsets.BusinessMonthBegin.n GL08\ + -i pandas.tseries.offsets.BusinessMonthBegin.name SA01\ + -i pandas.tseries.offsets.BusinessMonthBegin.nanos GL08\ + -i pandas.tseries.offsets.BusinessMonthBegin.normalize GL08\ + -i pandas.tseries.offsets.BusinessMonthBegin.rule_code GL08\ + -i pandas.tseries.offsets.BusinessMonthEnd PR02\ + -i pandas.tseries.offsets.BusinessMonthEnd.copy SA01\ + -i pandas.tseries.offsets.BusinessMonthEnd.freqstr SA01\ + -i pandas.tseries.offsets.BusinessMonthEnd.is_on_offset GL08\ + -i pandas.tseries.offsets.BusinessMonthEnd.kwds SA01\ + -i pandas.tseries.offsets.BusinessMonthEnd.n GL08\ + -i pandas.tseries.offsets.BusinessMonthEnd.name SA01\ + -i pandas.tseries.offsets.BusinessMonthEnd.nanos GL08\ + -i pandas.tseries.offsets.BusinessMonthEnd.normalize GL08\ + -i pandas.tseries.offsets.BusinessMonthEnd.rule_code GL08\ + -i pandas.tseries.offsets.CBMonthBegin PR02\ + -i pandas.tseries.offsets.CBMonthEnd PR02\ + -i pandas.tseries.offsets.CDay PR02,SA01\ + -i pandas.tseries.offsets.CustomBusinessDay PR02,SA01\ + -i pandas.tseries.offsets.CustomBusinessDay.calendar GL08\ + -i pandas.tseries.offsets.CustomBusinessDay.copy SA01\ + -i pandas.tseries.offsets.CustomBusinessDay.freqstr SA01\ + -i pandas.tseries.offsets.CustomBusinessDay.holidays GL08\ + -i pandas.tseries.offsets.CustomBusinessDay.is_on_offset GL08\ + -i pandas.tseries.offsets.CustomBusinessDay.kwds SA01\ + -i pandas.tseries.offsets.CustomBusinessDay.n GL08\ + -i pandas.tseries.offsets.CustomBusinessDay.name SA01\ + -i pandas.tseries.offsets.CustomBusinessDay.nanos GL08\ + -i pandas.tseries.offsets.CustomBusinessDay.normalize GL08\ + -i pandas.tseries.offsets.CustomBusinessDay.rule_code GL08\ + -i pandas.tseries.offsets.CustomBusinessDay.weekmask GL08\ + -i pandas.tseries.offsets.CustomBusinessHour PR02,SA01\ + -i pandas.tseries.offsets.CustomBusinessHour.calendar GL08\ + -i pandas.tseries.offsets.CustomBusinessHour.copy SA01\ + -i pandas.tseries.offsets.CustomBusinessHour.end GL08\ + -i pandas.tseries.offsets.CustomBusinessHour.freqstr SA01\ + -i pandas.tseries.offsets.CustomBusinessHour.holidays GL08\ + -i pandas.tseries.offsets.CustomBusinessHour.is_on_offset GL08\ + -i pandas.tseries.offsets.CustomBusinessHour.kwds SA01\ + -i pandas.tseries.offsets.CustomBusinessHour.n GL08\ + -i pandas.tseries.offsets.CustomBusinessHour.name SA01\ + -i pandas.tseries.offsets.CustomBusinessHour.nanos GL08\ + -i pandas.tseries.offsets.CustomBusinessHour.normalize GL08\ + -i pandas.tseries.offsets.CustomBusinessHour.rule_code GL08\ + -i pandas.tseries.offsets.CustomBusinessHour.start GL08\ + -i pandas.tseries.offsets.CustomBusinessHour.weekmask GL08\ + -i pandas.tseries.offsets.CustomBusinessMonthBegin PR02\ + -i pandas.tseries.offsets.CustomBusinessMonthBegin.calendar GL08\ + -i pandas.tseries.offsets.CustomBusinessMonthBegin.copy SA01\ + -i pandas.tseries.offsets.CustomBusinessMonthBegin.freqstr SA01\ + -i pandas.tseries.offsets.CustomBusinessMonthBegin.holidays GL08\ + -i pandas.tseries.offsets.CustomBusinessMonthBegin.is_on_offset SA01\ + -i pandas.tseries.offsets.CustomBusinessMonthBegin.kwds SA01\ + -i pandas.tseries.offsets.CustomBusinessMonthBegin.m_offset GL08\ + -i pandas.tseries.offsets.CustomBusinessMonthBegin.n GL08\ + -i pandas.tseries.offsets.CustomBusinessMonthBegin.name SA01\ + -i pandas.tseries.offsets.CustomBusinessMonthBegin.nanos GL08\ + -i pandas.tseries.offsets.CustomBusinessMonthBegin.normalize GL08\ + -i pandas.tseries.offsets.CustomBusinessMonthBegin.rule_code GL08\ + -i pandas.tseries.offsets.CustomBusinessMonthBegin.weekmask GL08\ + -i pandas.tseries.offsets.CustomBusinessMonthEnd PR02\ + -i pandas.tseries.offsets.CustomBusinessMonthEnd.calendar GL08\ + -i pandas.tseries.offsets.CustomBusinessMonthEnd.copy SA01\ + -i pandas.tseries.offsets.CustomBusinessMonthEnd.freqstr SA01\ + -i pandas.tseries.offsets.CustomBusinessMonthEnd.holidays GL08\ + -i pandas.tseries.offsets.CustomBusinessMonthEnd.is_on_offset SA01\ + -i pandas.tseries.offsets.CustomBusinessMonthEnd.kwds SA01\ + -i pandas.tseries.offsets.CustomBusinessMonthEnd.m_offset GL08\ + -i pandas.tseries.offsets.CustomBusinessMonthEnd.n GL08\ + -i pandas.tseries.offsets.CustomBusinessMonthEnd.name SA01\ + -i pandas.tseries.offsets.CustomBusinessMonthEnd.nanos GL08\ + -i pandas.tseries.offsets.CustomBusinessMonthEnd.normalize GL08\ + -i pandas.tseries.offsets.CustomBusinessMonthEnd.rule_code GL08\ + -i pandas.tseries.offsets.CustomBusinessMonthEnd.weekmask GL08\ + -i pandas.tseries.offsets.DateOffset PR02\ + -i pandas.tseries.offsets.DateOffset.copy SA01\ + -i pandas.tseries.offsets.DateOffset.freqstr SA01\ + -i pandas.tseries.offsets.DateOffset.is_on_offset GL08\ + -i pandas.tseries.offsets.DateOffset.kwds SA01\ + -i pandas.tseries.offsets.DateOffset.n GL08\ + -i pandas.tseries.offsets.DateOffset.name SA01\ + -i pandas.tseries.offsets.DateOffset.nanos GL08\ + -i pandas.tseries.offsets.DateOffset.normalize GL08\ + -i pandas.tseries.offsets.DateOffset.rule_code GL08\ + -i pandas.tseries.offsets.Day PR02\ + -i pandas.tseries.offsets.Day.copy SA01\ + -i pandas.tseries.offsets.Day.delta GL08\ + -i pandas.tseries.offsets.Day.freqstr SA01\ + -i pandas.tseries.offsets.Day.is_on_offset GL08\ + -i pandas.tseries.offsets.Day.kwds SA01\ + -i pandas.tseries.offsets.Day.n GL08\ + -i pandas.tseries.offsets.Day.name SA01\ + -i pandas.tseries.offsets.Day.nanos SA01\ + -i pandas.tseries.offsets.Day.normalize GL08\ + -i pandas.tseries.offsets.Day.rule_code GL08\ + -i pandas.tseries.offsets.Easter PR02\ + -i pandas.tseries.offsets.Easter.copy SA01\ + -i pandas.tseries.offsets.Easter.freqstr SA01\ + -i pandas.tseries.offsets.Easter.is_on_offset GL08\ + -i pandas.tseries.offsets.Easter.kwds SA01\ + -i pandas.tseries.offsets.Easter.n GL08\ + -i pandas.tseries.offsets.Easter.name SA01\ + -i pandas.tseries.offsets.Easter.nanos GL08\ + -i pandas.tseries.offsets.Easter.normalize GL08\ + -i pandas.tseries.offsets.Easter.rule_code GL08\ + -i pandas.tseries.offsets.FY5253 PR02\ + -i pandas.tseries.offsets.FY5253.copy SA01\ + -i pandas.tseries.offsets.FY5253.freqstr SA01\ + -i pandas.tseries.offsets.FY5253.get_rule_code_suffix GL08\ + -i pandas.tseries.offsets.FY5253.get_year_end GL08\ + -i pandas.tseries.offsets.FY5253.is_on_offset GL08\ + -i pandas.tseries.offsets.FY5253.kwds SA01\ + -i pandas.tseries.offsets.FY5253.n GL08\ + -i pandas.tseries.offsets.FY5253.name SA01\ + -i pandas.tseries.offsets.FY5253.nanos GL08\ + -i pandas.tseries.offsets.FY5253.normalize GL08\ + -i pandas.tseries.offsets.FY5253.rule_code GL08\ + -i pandas.tseries.offsets.FY5253.startingMonth GL08\ + -i pandas.tseries.offsets.FY5253.variation GL08\ + -i pandas.tseries.offsets.FY5253.weekday GL08\ + -i pandas.tseries.offsets.FY5253Quarter PR02\ + -i pandas.tseries.offsets.FY5253Quarter.copy SA01\ + -i pandas.tseries.offsets.FY5253Quarter.freqstr SA01\ + -i pandas.tseries.offsets.FY5253Quarter.get_rule_code_suffix GL08\ + -i pandas.tseries.offsets.FY5253Quarter.get_weeks GL08\ + -i pandas.tseries.offsets.FY5253Quarter.is_on_offset GL08\ + -i pandas.tseries.offsets.FY5253Quarter.kwds SA01\ + -i pandas.tseries.offsets.FY5253Quarter.n GL08\ + -i pandas.tseries.offsets.FY5253Quarter.name SA01\ + -i pandas.tseries.offsets.FY5253Quarter.nanos GL08\ + -i pandas.tseries.offsets.FY5253Quarter.normalize GL08\ + -i pandas.tseries.offsets.FY5253Quarter.qtr_with_extra_week GL08\ + -i pandas.tseries.offsets.FY5253Quarter.rule_code GL08\ + -i pandas.tseries.offsets.FY5253Quarter.startingMonth GL08\ + -i pandas.tseries.offsets.FY5253Quarter.variation GL08\ + -i pandas.tseries.offsets.FY5253Quarter.weekday GL08\ + -i pandas.tseries.offsets.FY5253Quarter.year_has_extra_week GL08\ + -i pandas.tseries.offsets.Hour PR02\ + -i pandas.tseries.offsets.Hour.copy SA01\ + -i pandas.tseries.offsets.Hour.delta GL08\ + -i pandas.tseries.offsets.Hour.freqstr SA01\ + -i pandas.tseries.offsets.Hour.is_on_offset GL08\ + -i pandas.tseries.offsets.Hour.kwds SA01\ + -i pandas.tseries.offsets.Hour.n GL08\ + -i pandas.tseries.offsets.Hour.name SA01\ + -i pandas.tseries.offsets.Hour.nanos SA01\ + -i pandas.tseries.offsets.Hour.normalize GL08\ + -i pandas.tseries.offsets.Hour.rule_code GL08\ + -i pandas.tseries.offsets.LastWeekOfMonth PR02,SA01\ + -i pandas.tseries.offsets.LastWeekOfMonth.copy SA01\ + -i pandas.tseries.offsets.LastWeekOfMonth.freqstr SA01\ + -i pandas.tseries.offsets.LastWeekOfMonth.is_on_offset GL08\ + -i pandas.tseries.offsets.LastWeekOfMonth.kwds SA01\ + -i pandas.tseries.offsets.LastWeekOfMonth.n GL08\ + -i pandas.tseries.offsets.LastWeekOfMonth.name SA01\ + -i pandas.tseries.offsets.LastWeekOfMonth.nanos GL08\ + -i pandas.tseries.offsets.LastWeekOfMonth.normalize GL08\ + -i pandas.tseries.offsets.LastWeekOfMonth.rule_code GL08\ + -i pandas.tseries.offsets.LastWeekOfMonth.week GL08\ + -i pandas.tseries.offsets.LastWeekOfMonth.weekday GL08\ + -i pandas.tseries.offsets.Micro PR02\ + -i pandas.tseries.offsets.Micro.copy SA01\ + -i pandas.tseries.offsets.Micro.delta GL08\ + -i pandas.tseries.offsets.Micro.freqstr SA01\ + -i pandas.tseries.offsets.Micro.is_on_offset GL08\ + -i pandas.tseries.offsets.Micro.kwds SA01\ + -i pandas.tseries.offsets.Micro.n GL08\ + -i pandas.tseries.offsets.Micro.name SA01\ + -i pandas.tseries.offsets.Micro.nanos SA01\ + -i pandas.tseries.offsets.Micro.normalize GL08\ + -i pandas.tseries.offsets.Micro.rule_code GL08\ + -i pandas.tseries.offsets.Milli PR02\ + -i pandas.tseries.offsets.Milli.copy SA01\ + -i pandas.tseries.offsets.Milli.delta GL08\ + -i pandas.tseries.offsets.Milli.freqstr SA01\ + -i pandas.tseries.offsets.Milli.is_on_offset GL08\ + -i pandas.tseries.offsets.Milli.kwds SA01\ + -i pandas.tseries.offsets.Milli.n GL08\ + -i pandas.tseries.offsets.Milli.name SA01\ + -i pandas.tseries.offsets.Milli.nanos SA01\ + -i pandas.tseries.offsets.Milli.normalize GL08\ + -i pandas.tseries.offsets.Milli.rule_code GL08\ + -i pandas.tseries.offsets.Minute PR02\ + -i pandas.tseries.offsets.Minute.copy SA01\ + -i pandas.tseries.offsets.Minute.delta GL08\ + -i pandas.tseries.offsets.Minute.freqstr SA01\ + -i pandas.tseries.offsets.Minute.is_on_offset GL08\ + -i pandas.tseries.offsets.Minute.kwds SA01\ + -i pandas.tseries.offsets.Minute.n GL08\ + -i pandas.tseries.offsets.Minute.name SA01\ + -i pandas.tseries.offsets.Minute.nanos SA01\ + -i pandas.tseries.offsets.Minute.normalize GL08\ + -i pandas.tseries.offsets.Minute.rule_code GL08\ + -i pandas.tseries.offsets.MonthBegin PR02\ + -i pandas.tseries.offsets.MonthBegin.copy SA01\ + -i pandas.tseries.offsets.MonthBegin.freqstr SA01\ + -i pandas.tseries.offsets.MonthBegin.is_on_offset GL08\ + -i pandas.tseries.offsets.MonthBegin.kwds SA01\ + -i pandas.tseries.offsets.MonthBegin.n GL08\ + -i pandas.tseries.offsets.MonthBegin.name SA01\ + -i pandas.tseries.offsets.MonthBegin.nanos GL08\ + -i pandas.tseries.offsets.MonthBegin.normalize GL08\ + -i pandas.tseries.offsets.MonthBegin.rule_code GL08\ + -i pandas.tseries.offsets.MonthEnd PR02\ + -i pandas.tseries.offsets.MonthEnd.copy SA01\ + -i pandas.tseries.offsets.MonthEnd.freqstr SA01\ + -i pandas.tseries.offsets.MonthEnd.is_on_offset GL08\ + -i pandas.tseries.offsets.MonthEnd.kwds SA01\ + -i pandas.tseries.offsets.MonthEnd.n GL08\ + -i pandas.tseries.offsets.MonthEnd.name SA01\ + -i pandas.tseries.offsets.MonthEnd.nanos GL08\ + -i pandas.tseries.offsets.MonthEnd.normalize GL08\ + -i pandas.tseries.offsets.MonthEnd.rule_code GL08\ + -i pandas.tseries.offsets.Nano PR02\ + -i pandas.tseries.offsets.Nano.copy SA01\ + -i pandas.tseries.offsets.Nano.delta GL08\ + -i pandas.tseries.offsets.Nano.freqstr SA01\ + -i pandas.tseries.offsets.Nano.is_on_offset GL08\ + -i pandas.tseries.offsets.Nano.kwds SA01\ + -i pandas.tseries.offsets.Nano.n GL08\ + -i pandas.tseries.offsets.Nano.name SA01\ + -i pandas.tseries.offsets.Nano.nanos SA01\ + -i pandas.tseries.offsets.Nano.normalize GL08\ + -i pandas.tseries.offsets.Nano.rule_code GL08\ + -i pandas.tseries.offsets.QuarterBegin PR02\ + -i pandas.tseries.offsets.QuarterBegin.copy SA01\ + -i pandas.tseries.offsets.QuarterBegin.freqstr SA01\ + -i pandas.tseries.offsets.QuarterBegin.is_on_offset GL08\ + -i pandas.tseries.offsets.QuarterBegin.kwds SA01\ + -i pandas.tseries.offsets.QuarterBegin.n GL08\ + -i pandas.tseries.offsets.QuarterBegin.name SA01\ + -i pandas.tseries.offsets.QuarterBegin.nanos GL08\ + -i pandas.tseries.offsets.QuarterBegin.normalize GL08\ + -i pandas.tseries.offsets.QuarterBegin.rule_code GL08\ + -i pandas.tseries.offsets.QuarterBegin.startingMonth GL08\ + -i pandas.tseries.offsets.QuarterEnd PR02\ + -i pandas.tseries.offsets.QuarterEnd.copy SA01\ + -i pandas.tseries.offsets.QuarterEnd.freqstr SA01\ + -i pandas.tseries.offsets.QuarterEnd.is_on_offset GL08\ + -i pandas.tseries.offsets.QuarterEnd.kwds SA01\ + -i pandas.tseries.offsets.QuarterEnd.n GL08\ + -i pandas.tseries.offsets.QuarterEnd.name SA01\ + -i pandas.tseries.offsets.QuarterEnd.nanos GL08\ + -i pandas.tseries.offsets.QuarterEnd.normalize GL08\ + -i pandas.tseries.offsets.QuarterEnd.rule_code GL08\ + -i pandas.tseries.offsets.QuarterEnd.startingMonth GL08\ + -i pandas.tseries.offsets.Second PR02\ + -i pandas.tseries.offsets.Second.copy SA01\ + -i pandas.tseries.offsets.Second.delta GL08\ + -i pandas.tseries.offsets.Second.freqstr SA01\ + -i pandas.tseries.offsets.Second.is_on_offset GL08\ + -i pandas.tseries.offsets.Second.kwds SA01\ + -i pandas.tseries.offsets.Second.n GL08\ + -i pandas.tseries.offsets.Second.name SA01\ + -i pandas.tseries.offsets.Second.nanos SA01\ + -i pandas.tseries.offsets.Second.normalize GL08\ + -i pandas.tseries.offsets.Second.rule_code GL08\ + -i pandas.tseries.offsets.SemiMonthBegin PR02,SA01\ + -i pandas.tseries.offsets.SemiMonthBegin.copy SA01\ + -i pandas.tseries.offsets.SemiMonthBegin.day_of_month GL08\ + -i pandas.tseries.offsets.SemiMonthBegin.freqstr SA01\ + -i pandas.tseries.offsets.SemiMonthBegin.is_on_offset GL08\ + -i pandas.tseries.offsets.SemiMonthBegin.kwds SA01\ + -i pandas.tseries.offsets.SemiMonthBegin.n GL08\ + -i pandas.tseries.offsets.SemiMonthBegin.name SA01\ + -i pandas.tseries.offsets.SemiMonthBegin.nanos GL08\ + -i pandas.tseries.offsets.SemiMonthBegin.normalize GL08\ + -i pandas.tseries.offsets.SemiMonthBegin.rule_code GL08\ + -i pandas.tseries.offsets.SemiMonthEnd PR02,SA01\ + -i pandas.tseries.offsets.SemiMonthEnd.copy SA01\ + -i pandas.tseries.offsets.SemiMonthEnd.day_of_month GL08\ + -i pandas.tseries.offsets.SemiMonthEnd.freqstr SA01\ + -i pandas.tseries.offsets.SemiMonthEnd.is_on_offset GL08\ + -i pandas.tseries.offsets.SemiMonthEnd.kwds SA01\ + -i pandas.tseries.offsets.SemiMonthEnd.n GL08\ + -i pandas.tseries.offsets.SemiMonthEnd.name SA01\ + -i pandas.tseries.offsets.SemiMonthEnd.nanos GL08\ + -i pandas.tseries.offsets.SemiMonthEnd.normalize GL08\ + -i pandas.tseries.offsets.SemiMonthEnd.rule_code GL08\ + -i pandas.tseries.offsets.Tick GL08\ + -i pandas.tseries.offsets.Tick.copy SA01\ + -i pandas.tseries.offsets.Tick.delta GL08\ + -i pandas.tseries.offsets.Tick.freqstr SA01\ + -i pandas.tseries.offsets.Tick.is_on_offset GL08\ + -i pandas.tseries.offsets.Tick.kwds SA01\ + -i pandas.tseries.offsets.Tick.n GL08\ + -i pandas.tseries.offsets.Tick.name SA01\ + -i pandas.tseries.offsets.Tick.nanos SA01\ + -i pandas.tseries.offsets.Tick.normalize GL08\ + -i pandas.tseries.offsets.Tick.rule_code GL08\ + -i pandas.tseries.offsets.Week PR02\ + -i pandas.tseries.offsets.Week.copy SA01\ + -i pandas.tseries.offsets.Week.freqstr SA01\ + -i pandas.tseries.offsets.Week.is_on_offset GL08\ + -i pandas.tseries.offsets.Week.kwds SA01\ + -i pandas.tseries.offsets.Week.n GL08\ + -i pandas.tseries.offsets.Week.name SA01\ + -i pandas.tseries.offsets.Week.nanos GL08\ + -i pandas.tseries.offsets.Week.normalize GL08\ + -i pandas.tseries.offsets.Week.rule_code GL08\ + -i pandas.tseries.offsets.Week.weekday GL08\ + -i pandas.tseries.offsets.WeekOfMonth PR02,SA01\ + -i pandas.tseries.offsets.WeekOfMonth.copy SA01\ + -i pandas.tseries.offsets.WeekOfMonth.freqstr SA01\ + -i pandas.tseries.offsets.WeekOfMonth.is_on_offset GL08\ + -i pandas.tseries.offsets.WeekOfMonth.kwds SA01\ + -i pandas.tseries.offsets.WeekOfMonth.n GL08\ + -i pandas.tseries.offsets.WeekOfMonth.name SA01\ + -i pandas.tseries.offsets.WeekOfMonth.nanos GL08\ + -i pandas.tseries.offsets.WeekOfMonth.normalize GL08\ + -i pandas.tseries.offsets.WeekOfMonth.rule_code GL08\ + -i pandas.tseries.offsets.WeekOfMonth.week GL08\ + -i pandas.tseries.offsets.WeekOfMonth.weekday GL08\ + -i pandas.tseries.offsets.YearBegin PR02\ + -i pandas.tseries.offsets.YearBegin.copy SA01\ + -i pandas.tseries.offsets.YearBegin.freqstr SA01\ + -i pandas.tseries.offsets.YearBegin.is_on_offset GL08\ + -i pandas.tseries.offsets.YearBegin.kwds SA01\ + -i pandas.tseries.offsets.YearBegin.month GL08\ + -i pandas.tseries.offsets.YearBegin.n GL08\ + -i pandas.tseries.offsets.YearBegin.name SA01\ + -i pandas.tseries.offsets.YearBegin.nanos GL08\ + -i pandas.tseries.offsets.YearBegin.normalize GL08\ + -i pandas.tseries.offsets.YearBegin.rule_code GL08\ + -i pandas.tseries.offsets.YearEnd PR02\ + -i pandas.tseries.offsets.YearEnd.copy SA01\ + -i pandas.tseries.offsets.YearEnd.freqstr SA01\ + -i pandas.tseries.offsets.YearEnd.is_on_offset GL08\ + -i pandas.tseries.offsets.YearEnd.kwds SA01\ + -i pandas.tseries.offsets.YearEnd.month GL08\ + -i pandas.tseries.offsets.YearEnd.n GL08\ + -i pandas.tseries.offsets.YearEnd.name SA01\ + -i pandas.tseries.offsets.YearEnd.nanos GL08\ + -i pandas.tseries.offsets.YearEnd.normalize GL08\ + -i pandas.tseries.offsets.YearEnd.rule_code GL08\ + -i pandas.unique PR07\ + -i pandas.util.hash_array PR07,SA01\ + -i pandas.util.hash_pandas_object PR07,SA01 # There should be no backslash in the final line, please keep this comment in the last ignored function + + RET=$(($RET + $?)) ; echo $MSG "DONE" fi diff --git a/scripts/tests/test_validate_docstrings.py b/scripts/tests/test_validate_docstrings.py index 73bfb12316dc5..72d5c03ab724f 100644 --- a/scripts/tests/test_validate_docstrings.py +++ b/scripts/tests/test_validate_docstrings.py @@ -255,29 +255,28 @@ def test_validate_all_ignore_errors(self, monkeypatch): ], ) - exit_status_ignore_func = validate_docstrings.print_validate_all_results( + exit_status = validate_docstrings.print_validate_all_results( output_format="default", prefix=None, - errors=["ER01", "ER02"], ignore_deprecated=False, - ignore_errors={ - "pandas.DataFrame.align": ["ER01"], - # ignoring an error that is not requested should be of no effect - "pandas.Index.all": ["ER03"] - } + ignore_errors={"*": {"ER03"}}, ) + # two functions * two not ignored errors + assert exit_status == 2 * 2 + exit_status = validate_docstrings.print_validate_all_results( output_format="default", prefix=None, - errors=["ER01", "ER02"], ignore_deprecated=False, - ignore_errors=None + ignore_errors={ + "*": {"ER03"}, + "pandas.DataFrame.align": {"ER01"}, + # ignoring an error that is not requested should be of no effect + "pandas.Index.all": {"ER03"} + } ) - - # we have 2 error codes activated out of the 3 available in the validate results - # one run has a function to ignore, the other does not - assert exit_status == 2*2 - assert exit_status_ignore_func == exit_status - 1 + # two functions * two not global ignored errors - one function ignored error + assert exit_status == 2 * 2 - 1 @@ -399,11 +398,10 @@ def test_exit_status_for_main(self, monkeypatch) -> None: func_name="docstring1", prefix=None, output_format="default", - errors=[], ignore_deprecated=False, ignore_errors=None, ) - assert exit_status == 0 + assert exit_status == 3 def test_exit_status_errors_for_validate_all(self, monkeypatch) -> None: monkeypatch.setattr( @@ -430,7 +428,6 @@ def test_exit_status_errors_for_validate_all(self, monkeypatch) -> None: func_name=None, prefix=None, output_format="default", - errors=[], ignore_deprecated=False, ignore_errors=None, ) @@ -449,7 +446,6 @@ def test_no_exit_status_noerrors_for_validate_all(self, monkeypatch) -> None: func_name=None, output_format="default", prefix=None, - errors=[], ignore_deprecated=False, ignore_errors=None, ) @@ -474,7 +470,6 @@ def test_exit_status_for_validate_all_json(self, monkeypatch) -> None: func_name=None, output_format="json", prefix=None, - errors=[], ignore_deprecated=False, ignore_errors=None, ) @@ -519,18 +514,16 @@ def test_errors_param_filters_errors(self, monkeypatch) -> None: func_name=None, output_format="default", prefix=None, - errors=["ER01"], ignore_deprecated=False, - ignore_errors=None, + ignore_errors={"*": {"ER02", "ER03"}}, ) assert exit_status == 3 exit_status = validate_docstrings.main( func_name=None, - prefix=None, output_format="default", - errors=["ER03"], + prefix=None, ignore_deprecated=False, - ignore_errors=None, + ignore_errors={"*": {"ER01", "ER02"}}, ) assert exit_status == 1 diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py index b42deff66f546..0057f97ffa211 100755 --- a/scripts/validate_docstrings.py +++ b/scripts/validate_docstrings.py @@ -16,6 +16,7 @@ from __future__ import annotations import argparse +import collections import doctest import importlib import json @@ -65,6 +66,10 @@ "EX04": "Do not import {imported_library}, as it is imported " "automatically for the examples (numpy as np, pandas as pd)", } +ALL_ERRORS = set(NUMPYDOC_ERROR_MSGS).union(set(ERROR_MSGS)) +duplicated_errors = set(NUMPYDOC_ERROR_MSGS).intersection(set(ERROR_MSGS)) +assert not duplicated_errors, (f"Errors {duplicated_errors} exist in both pandas " + "and numpydoc, should they be removed from pandas?") def pandas_error(code, **kwargs): @@ -340,9 +345,8 @@ def get_all_api_items(): def print_validate_all_results( output_format: str, prefix: str | None, - errors: list[str] | None, ignore_deprecated: bool, - ignore_errors: dict[str, list[str]] | None, + ignore_errors: dict[str, set[str]], ): if output_format not in ("default", "json", "actions"): raise ValueError(f'Unknown output_format "{output_format}"') @@ -358,22 +362,28 @@ def print_validate_all_results( prefix = "##[error]" if output_format == "actions" else "" exit_status = 0 for func_name, res in result.items(): - for err_code, err_desc in res["errors"]: - is_not_requested_error = errors and err_code not in errors - is_ignored_error = err_code in ignore_errors.get(func_name, []) - if is_not_requested_error or is_ignored_error: - continue - + error_messages = dict(res["errors"]) + actual_failures = set(error_messages) + expected_failures = (ignore_errors.get(func_name, set()) + | ignore_errors.get("*", set())) + for err_code in actual_failures - expected_failures: + sys.stdout.write( + f'{prefix}{res["file"]}:{res["file_line"]}:' + f'{err_code}:{func_name}:{error_messages[err_code]}\n' + ) + exit_status += 1 + for err_code in ignore_errors.get(func_name, set()) - actual_failures: sys.stdout.write( f'{prefix}{res["file"]}:{res["file_line"]}:' - f"{err_code}:{func_name}:{err_desc}\n" + f"{err_code}:{func_name}:" + "EXPECTED TO FAIL, BUT NOT FAILING\n" ) exit_status += 1 return exit_status -def print_validate_one_results(func_name: str) -> None: +def print_validate_one_results(func_name: str) -> int: def header(title, width=80, char="#") -> str: full_line = char * width side_len = (width - len(title) - 2) // 2 @@ -399,20 +409,45 @@ def header(title, width=80, char="#") -> str: sys.stderr.write(header("Doctests")) sys.stderr.write(result["examples_errs"]) + return len(result["errors"]) + len(result["examples_errs"]) + + +def _format_ignore_errors(raw_ignore_errors): + ignore_errors = collections.defaultdict(set) + if raw_ignore_errors: + for obj_name, error_codes in raw_ignore_errors: + # function errors "pandas.Series PR01,SA01" + if obj_name != "*": + if obj_name in ignore_errors: + raise ValueError( + f"Object `{obj_name}` is present in more than one " + "--ignore_errors argument. Please use it once and specify " + "the errors separated by commas.") + ignore_errors[obj_name] = set(error_codes.split(",")) + + unknown_errors = ignore_errors[obj_name] - ALL_ERRORS + if unknown_errors: + raise ValueError( + f"Object `{obj_name}` is ignoring errors {unknown_errors} " + f"which are not known. Known errors are: {ALL_ERRORS}") + + # global errors "PR02,ES01" + else: + ignore_errors["*"].update(set(error_codes.split(","))) + + unknown_errors = ignore_errors["*"] - ALL_ERRORS + if unknown_errors: + raise ValueError( + f"Unknown errors {unknown_errors} specified using --ignore_errors " + "Known errors are: {ALL_ERRORS}") -def validate_error_codes(errors): - overlapped_errors = set(NUMPYDOC_ERROR_MSGS).intersection(set(ERROR_MSGS)) - assert not overlapped_errors, f"{overlapped_errors} is overlapped." - all_errors = set(NUMPYDOC_ERROR_MSGS).union(set(ERROR_MSGS)) - nonexistent_errors = set(errors) - all_errors - assert not nonexistent_errors, f"{nonexistent_errors} don't exist." + return ignore_errors def main( func_name, output_format, prefix, - errors, ignore_deprecated, ignore_errors ): @@ -420,31 +455,14 @@ def main( Main entry point. Call the validation for one or for all docstrings. """ if func_name is None: - error_str = ", ".join(errors) - msg = f"Validate docstrings ({error_str})\n" - else: - msg = f"Validate docstring in function {func_name}\n" - sys.stdout.write(msg) - - validate_error_codes(errors) - if ignore_errors is not None: - for error_codes in ignore_errors.values(): - validate_error_codes(error_codes) - - if func_name is None: - exit_status = print_validate_all_results( + return print_validate_all_results( output_format, prefix, - errors, ignore_deprecated, ignore_errors ) else: - print_validate_one_results(func_name) - exit_status = 0 - sys.stdout.write(msg + "DONE" + os.linesep) - - return exit_status + return print_validate_one_results(func_name) if __name__ == "__main__": @@ -474,14 +492,6 @@ def main( "of methods starting by this pattern. It is " "ignored if parameter function is provided", ) - argparser.add_argument( - "--errors", - default=None, - help="comma separated " - "list of error codes to validate. By default it " - "validates all errors (ignored when validating " - "a single docstring)", - ) argparser.add_argument( "--ignore_deprecated", default=False, @@ -492,6 +502,7 @@ def main( ) argparser.add_argument( "--ignore_errors", + "-i", default=None, action="append", nargs=2, @@ -504,18 +515,11 @@ def main( ) args = argparser.parse_args(sys.argv[1:]) - args.errors = args.errors.split(",") if args.errors else None - if args.ignore_errors: - args.ignore_errors = {function: error_codes.split(",") - for function, error_codes - in args.ignore_errors} - sys.exit( main(args.function, args.format, args.prefix, - args.errors, args.ignore_deprecated, - args.ignore_errors + _format_ignore_errors(args.ignore_errors), ) )
Making the validation of docstrings more robust, main changes: - If we ignore an error that doesn't fail, the CI will report it and break (there were 30 errors being ignored that are already fixed and removed from the list of ignores here) - Instead of specifying all the errors to validate (that are all but one now) we can specify errors to skip (there is one only that we skip, the lack of an extended summary in a docstring). This simplifies the script a bit - In case an unknown error code is used when ignoring errors, the message should be more descriptive - I created an alias for `--ignore_errors` as `-i`, so the list of errors to ignore is a bit easier to read CC: @dontgoto @jordan-d-murphy
https://api.github.com/repos/pandas-dev/pandas/pulls/57879
2024-03-18T01:30:35Z
2024-03-19T01:05:39Z
2024-03-19T01:05:39Z
2024-03-29T06:48:04Z
CI: make docstring checks "instantaneous"
diff --git a/scripts/tests/test_validate_docstrings.py b/scripts/tests/test_validate_docstrings.py index 73bfb12316dc5..ed9b52557b28f 100644 --- a/scripts/tests/test_validate_docstrings.py +++ b/scripts/tests/test_validate_docstrings.py @@ -90,6 +90,69 @@ def leftover_files(self) -> None: """ +def _print_validate_all_base(monkeypatch, + prefix=None, + ignore_deprecated=False, + ignore_errors=None, + overwrite_api_items=True + ) -> tuple[int, int]: + dummy_docinfo = { + "docstring": "docstring1", + "errors": [ + ("ER01", "err desc"), + ("ER02", "err desc"), + ("ER03", "err desc") + ], + "warnings": [], + "deprecated": True, + "file": "file1", + "file_line": "file_line1" + } + monkeypatch.setattr( + validate_docstrings, + "pandas_validate", + lambda func_names: { + func_name: dummy_docinfo + for func_name in func_names}, + ) + if overwrite_api_items: + monkeypatch.setattr( + validate_docstrings, + "get_all_api_items", + lambda: [ + ( + "pandas.DataFrame.align", + "func", + "current_section", + "current_subsection", + ), + ( + "pandas.Index.all", + "func", + "current_section", + "current_subsection", + ), + ], + ) + + exit_status = validate_docstrings.print_validate_all_results( + output_format="default", + prefix=prefix, + errors=["ER01", "ER02"], + ignore_deprecated=ignore_deprecated, + ignore_errors=ignore_errors + ) + exit_status_reference = validate_docstrings.print_validate_all_results( + output_format="default", + prefix=None, + errors=["ER01", "ER02"], + ignore_deprecated=False, + ignore_errors=None + ) + + return exit_status_reference, exit_status + + class TestValidator: def _import_path(self, klass=None, func=None): """ @@ -118,11 +181,14 @@ def _import_path(self, klass=None, func=None): return base_path def test_bad_class(self, capsys) -> None: - errors = validate_docstrings.pandas_validate( + results = validate_docstrings.pandas_validate( self._import_path(klass="BadDocstrings") - )["errors"] - assert isinstance(errors, list) - assert errors + ) + assert len(results.keys()) == 1 + for docinfo in results.values(): + errors = docinfo["errors"] + assert errors + assert isinstance(errors, list) @pytest.mark.parametrize( "klass,func,msgs", @@ -193,92 +259,41 @@ def test_bad_class(self, capsys) -> None: ], ) def test_bad_docstrings(self, capsys, klass, func, msgs) -> None: - result = validate_docstrings.pandas_validate( + results = validate_docstrings.pandas_validate( self._import_path(klass=klass, func=func) ) - for msg in msgs: - assert msg in " ".join([err[1] for err in result["errors"]]) - - def test_validate_all_ignore_deprecated(self, monkeypatch) -> None: - monkeypatch.setattr( - validate_docstrings, - "pandas_validate", - lambda func_name: { - "docstring": "docstring1", - "errors": [ - ("ER01", "err desc"), - ("ER02", "err desc"), - ("ER03", "err desc"), - ], - "warnings": [], - "examples_errors": "", - "deprecated": True, - }, - ) - result = validate_docstrings.validate_all(prefix=None, ignore_deprecated=True) - assert len(result) == 0 + assert len(results.keys()) == 1 + for result in results.values(): + for msg in msgs: + assert msg in " ".join([err[1] for err in result["errors"]]) + + def test_print_validate_all_ignore_deprecated(self, monkeypatch) -> None: + status, status_ignore_depr = _print_validate_all_base(monkeypatch, + ignore_deprecated=True, + overwrite_api_items=False) + assert status_ignore_depr == 0 + assert status > 100 + assert status % 2 == 0 + + def test_validate_all_prefix(self, monkeypatch): + status, status_prefix = _print_validate_all_base(monkeypatch, + prefix="pandas.DataFrame") + # the two errors of pandas.Index shall not be counted + assert status_prefix == status - 2 def test_validate_all_ignore_errors(self, monkeypatch): - monkeypatch.setattr( - validate_docstrings, - "pandas_validate", - lambda func_name: { - "docstring": "docstring1", - "errors": [ - ("ER01", "err desc"), - ("ER02", "err desc"), - ("ER03", "err desc") - ], - "warnings": [], - "examples_errors": "", - "deprecated": True, - "file": "file1", - "file_line": "file_line1" - }, - ) - monkeypatch.setattr( - validate_docstrings, - "get_all_api_items", - lambda: [ - ( - "pandas.DataFrame.align", - "func", - "current_section", - "current_subsection", - ), - ( - "pandas.Index.all", - "func", - "current_section", - "current_subsection", - ), - ], - ) - - exit_status_ignore_func = validate_docstrings.print_validate_all_results( - output_format="default", - prefix=None, - errors=["ER01", "ER02"], - ignore_deprecated=False, - ignore_errors={ - "pandas.DataFrame.align": ["ER01"], - # ignoring an error that is not requested should be of no effect - "pandas.Index.all": ["ER03"] - } - ) - exit_status = validate_docstrings.print_validate_all_results( - output_format="default", - prefix=None, - errors=["ER01", "ER02"], - ignore_deprecated=False, - ignore_errors=None - ) + ignore_errs = { + "pandas.DataFrame.align": ["ER01"], + # ignoring an error that is not requested should be of no effect + "pandas.Index.all": ["ER03"] + } + status, status_ignore_func = _print_validate_all_base(monkeypatch, + ignore_errors=ignore_errs) # we have 2 error codes activated out of the 3 available in the validate results # one run has a function to ignore, the other does not - assert exit_status == 2*2 - assert exit_status_ignore_func == exit_status - 1 - + assert status == 2*2 + assert status_ignore_func == status - 1 class TestApiItems: @@ -375,7 +390,8 @@ class TestPandasDocstringClass: ) def test_encode_content_write_to_file(self, name) -> None: # GH25466 - docstr = validate_docstrings.PandasDocstring(name).validate_pep8() + docstr = validate_docstrings.PandasDocstring(name) + docstr = validate_docstrings.validate_pep8_for_examples(docstr)[docstr] # the list of pep8 errors should be empty assert not list(docstr) @@ -392,7 +408,6 @@ def test_exit_status_for_main(self, monkeypatch) -> None: ("ER02", "err desc"), ("ER03", "err desc"), ], - "examples_errs": "", }, ) exit_status = validate_docstrings.main( diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py index b42deff66f546..f117324a571c3 100755 --- a/scripts/validate_docstrings.py +++ b/scripts/validate_docstrings.py @@ -56,14 +56,14 @@ PRIVATE_CLASSES = ["NDFrame", "IndexOpsMixin"] ERROR_MSGS = { "GL04": "Private classes ({mentioned_private_classes}) should not be " - "mentioned in public docstrings", + "mentioned in public docstrings", "PD01": "Use 'array-like' rather than 'array_like' in docstrings.", "SA05": "{reference_name} in `See Also` section does not need `pandas` " - "prefix, use {right_reference} instead.", + "prefix, use {right_reference} instead.", "EX03": "flake8 error: line {line_number}, col {col_number}: {error_code} " - "{error_message}", + "{error_message}", "EX04": "Do not import {imported_library}, as it is imported " - "automatically for the examples (numpy as np, pandas as pd)", + "automatically for the examples (numpy as np, pandas as pd)", } @@ -148,6 +148,100 @@ def get_api_items(api_doc_fd): previous_line = line_stripped +def validate_pep8_for_examples(docs: list[PandasDocstring] | PandasDocstring + ) -> dict[PandasDocstring, list[tuple]]: + """ + Call the pep8 validation for docstrings with examples and add the found errors. + + Parameters + ---------- + docs : list[PandasDocString] + List of docstrings to validate. + + Returns + ------- + dict[PandasDocstring, list] + Dict of function names and the pep8 error messages found in their docstrings. + The errors messages are of the form + (error_code, message, line_number, col_number). + """ + if isinstance(docs, PandasDocstring): + docs = [docs] + + with tempfile.TemporaryDirectory() as temp_dir: + doc_to_filename = {} + for doc in docs: + if not doc.examples: + continue + + # F401 is needed to not generate flake8 errors in examples + # that do not use numpy or pandas + content = "".join( + ( + "import numpy as np # noqa: F401\n", + "import pandas as pd # noqa: F401\n", + *doc.examples_source_code, + ) + ) + + temp_file = tempfile.NamedTemporaryFile(mode="w", + dir=temp_dir, + encoding="utf-8", + delete=False) + temp_file.write(content) + temp_file.flush() + doc_to_filename[doc] = temp_file.name + + # No docs with examples to process + if not doc_to_filename: + return {} + + cmd = [ + sys.executable, + "-m", + "flake8", + "--format=%(row)d\t%(col)d\t%(code)s\t%(text)s", + "--max-line-length=88", + "--ignore=E203,E3,W503,W504,E402,E731,E128,E124,E704", + ] + cmd.extend(doc_to_filename.values()) + response = subprocess.run(cmd, capture_output=True, check=False, + text=True) + + all_docs_error_messages = {doc: [] for doc in docs} + for doc, temp_file_name in doc_to_filename.items(): + # one output for each error, each error must be mapped to the func_name + for output in ("stdout", "stderr"): + out = getattr(response, output) + out = out.replace(temp_file_name, "").strip("\n").splitlines() + if out: + all_docs_error_messages[doc].extend(out) + + for doc, raw_error_messages in all_docs_error_messages.items(): + doc_error_messages = [] + for raw_error_message in raw_error_messages: + line_num, col_num, err_code, msg = raw_error_message.split("\t", maxsplit=3) + # Note: we subtract 2 from the line number because + # 'import numpy as np\nimport pandas as pd\n' + # is prepended to the docstrings. + doc_error_messages.append( + ( + err_code, + msg, + int(line_num) - 2, + int(col_num) + ) + ) + all_docs_error_messages[doc] = doc_error_messages + + for doc in docs: + if doc.examples and doc not in all_docs_error_messages.keys(): + raise KeyError(f"Docstring\n###\n{doc}\n###\nhas examples but " + f"no pep8 validation results.") + + return all_docs_error_messages + + class PandasDocstring(Validator): def __init__(self, func_name: str, doc_obj=None) -> None: self.func_name = func_name @@ -168,119 +262,85 @@ def examples_source_code(self): lines = doctest.DocTestParser().get_examples(self.raw_doc) return [line.source for line in lines] - def validate_pep8(self): - if not self.examples: - return - - # F401 is needed to not generate flake8 errors in examples - # that do not user numpy or pandas - content = "".join( - ( - "import numpy as np # noqa: F401\n", - "import pandas as pd # noqa: F401\n", - *self.examples_source_code, - ) - ) - - error_messages = [] - - file = tempfile.NamedTemporaryFile(mode="w", encoding="utf-8", delete=False) - try: - file.write(content) - file.flush() - cmd = [ - sys.executable, - "-m", - "flake8", - "--format=%(row)d\t%(col)d\t%(code)s\t%(text)s", - "--max-line-length=88", - "--ignore=E203,E3,W503,W504,E402,E731,E128,E124,E704", - file.name, - ] - response = subprocess.run(cmd, capture_output=True, check=False, text=True) - for output in ("stdout", "stderr"): - out = getattr(response, output) - out = out.replace(file.name, "") - messages = out.strip("\n").splitlines() - if messages: - error_messages.extend(messages) - finally: - file.close() - os.unlink(file.name) - - for error_message in error_messages: - line_number, col_number, error_code, message = error_message.split( - "\t", maxsplit=3 - ) - # Note: we subtract 2 from the line number because - # 'import numpy as np\nimport pandas as pd\n' - # is prepended to the docstrings. - yield error_code, message, int(line_number) - 2, int(col_number) - def non_hyphenated_array_like(self): return "array_like" in self.raw_doc -def pandas_validate(func_name: str): +def pandas_validate(func_names: str | list[str]) -> dict[str, dict]: """ Call the numpydoc validation, and add the errors specific to pandas. Parameters ---------- - func_name : str - Name of the object of the docstring to validate. + func_names : list[str] + The names of the objects of the docstrings to validate. Returns ------- - dict - Information about the docstring and the errors found. + dict[str, dict] + For each function, information about the docstring and the errors found. """ - func_obj = Validator._load_obj(func_name) - # Some objects are instances, e.g. IndexSlice, which numpydoc can't validate - doc_obj = get_doc_object(func_obj, doc=func_obj.__doc__) - doc = PandasDocstring(func_name, doc_obj) - result = validate(doc_obj) - mentioned_errs = doc.mentioned_private_classes - if mentioned_errs: - result["errors"].append( - pandas_error("GL04", mentioned_private_classes=", ".join(mentioned_errs)) - ) - - if doc.see_also: - result["errors"].extend( - pandas_error( - "SA05", - reference_name=rel_name, - right_reference=rel_name[len("pandas."):], - ) - for rel_name in doc.see_also - if rel_name.startswith("pandas.") - ) - - result["examples_errs"] = "" - if doc.examples: - for error_code, error_message, line_number, col_number in doc.validate_pep8(): + if isinstance(func_names, str): + func_names = [func_names] + + docs_to_results = {} + for func_name in func_names: + func_obj = Validator._load_obj(func_name) + # Some objects are instances, e.g. IndexSlice, which numpydoc can't validate + doc_obj = get_doc_object(func_obj, doc=func_obj.__doc__) + doc = PandasDocstring(func_name, doc_obj) + result = validate(doc_obj) + docs_to_results[doc] = result + + # add errors not from examples to the result + for doc, result in docs_to_results.items(): + mentioned_errs = doc.mentioned_private_classes + if mentioned_errs: result["errors"].append( pandas_error( - "EX03", - error_code=error_code, - error_message=error_message, - line_number=line_number, - col_number=col_number, - ) + "GL04", + mentioned_private_classes=", ".join(mentioned_errs)) ) - examples_source_code = "".join(doc.examples_source_code) - result["errors"].extend( - pandas_error("EX04", imported_library=wrong_import) - for wrong_import in ("numpy", "pandas") - if f"import {wrong_import}" in examples_source_code - ) - if doc.non_hyphenated_array_like(): - result["errors"].append(pandas_error("PD01")) + if doc.see_also: + see_also_prefix_errors = [ + pandas_error("SA05", + reference_name=rel_name, + right_reference=rel_name[len("pandas."):], + ) + for rel_name in doc.see_also + if rel_name.startswith("pandas.") + ] + result["errors"].extend(see_also_prefix_errors) + + if doc.non_hyphenated_array_like(): + result["errors"].append(pandas_error("PD01")) + + pep8_results = validate_pep8_for_examples(list(docs_to_results.keys())) + + for doc, pep8_errors in pep8_results.items(): + result = docs_to_results[doc] + pep8_pandas_errors = [ + pandas_error( + "EX03", + error_code=err_code, + error_message=err_msg, + line_number=line_number, + col_number=col_number, + ) for err_code, err_msg, line_number, col_number in pep8_errors + ] + result["errors"].extend(pep8_pandas_errors) + examples_source_code = "".join(doc.examples_source_code) + import_errors = [pandas_error("EX04", imported_library=wrong_import) + for wrong_import in ("numpy", "pandas") + if f"import {wrong_import}" in examples_source_code] + result["errors"].extend(import_errors) plt.close("all") - return result + validation_results = {doc.func_name: result + for doc, result + in docs_to_results.items()} + return validation_results def validate_all(prefix, ignore_deprecated=False): @@ -305,10 +365,17 @@ def validate_all(prefix, ignore_deprecated=False): result = {} seen = {} - for func_name, _, section, subsection in get_all_api_items(): - if prefix and not func_name.startswith(prefix): - continue - doc_info = pandas_validate(func_name) + def matches_prefix(function_name): + return function_name.startswith(prefix) if prefix else True + + api_items = [api_item for api_item + in get_all_api_items() + if matches_prefix(api_item[0])] + func_names = [api_item[0] for api_item in api_items] + doc_infos = pandas_validate(func_names) + + for func_name, _, section, subsection in api_items: + doc_info = doc_infos[func_name] if ignore_deprecated and doc_info["deprecated"]: continue result[func_name] = doc_info @@ -343,7 +410,7 @@ def print_validate_all_results( errors: list[str] | None, ignore_deprecated: bool, ignore_errors: dict[str, list[str]] | None, -): +) -> int: if output_format not in ("default", "json", "actions"): raise ValueError(f'Unknown output_format "{output_format}"') if ignore_errors is None: @@ -395,10 +462,6 @@ def header(title, width=80, char="#") -> str: else: sys.stderr.write(f'Docstring for "{func_name}" correct. :)\n') - if result["examples_errs"]: - sys.stderr.write(header("Doctests")) - sys.stderr.write(result["examples_errs"]) - def validate_error_codes(errors): overlapped_errors = set(NUMPYDOC_ERROR_MSGS).intersection(set(ERROR_MSGS)) @@ -455,7 +518,11 @@ def main( "as JSON" ) argparser = argparse.ArgumentParser(description="validate pandas docstrings") - argparser.add_argument("function", nargs="?", default=None, help=func_help) + argparser.add_argument( + "function", + nargs="?", + default=None, + help=func_help) argparser.add_argument( "--format", default="default",
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. This PR reduces the `check_code.sh docstrings` runtime to under 10 seconds. The key changes are in [`validate_pep8_for_examples`](https://github.com/dontgoto/pandas/blob/14e3915f21dc35fcdcc5ce198b35245fb51a68dd/scripts/validate_docstrings.py#L151), which was refactored to call the pep8 CLI once with a list of files to analyse. The functions in the call stack above were changed to handle lists instead of single objects as well. I also added a new test since the `prefix` parameter was not properly tested before. Let me know if you'd rather see this split into multiple PRs. I'd also wait until #57879 is merged as there will be quite a few merge conflicts. Unrelated code checks seem to be failing here and in other PRs.
https://api.github.com/repos/pandas-dev/pandas/pulls/57878
2024-03-18T00:23:30Z
2024-03-19T18:27:43Z
null
2024-03-19T18:27:43Z
CLN: replace deprecated freqs `H`/`M` with `h`/`ME` in tests for plotting
diff --git a/pandas/tests/plotting/frame/test_frame_subplots.py b/pandas/tests/plotting/frame/test_frame_subplots.py index 16853114d93cd..511266d5786c5 100644 --- a/pandas/tests/plotting/frame/test_frame_subplots.py +++ b/pandas/tests/plotting/frame/test_frame_subplots.py @@ -187,9 +187,9 @@ def test_subplots_timeseries_y_axis_not_supported(self): data = { "numeric": np.array([1, 2, 5]), "period": [ - pd.Period("2017-08-01 00:00:00", freq="H"), - pd.Period("2017-08-01 02:00", freq="H"), - pd.Period("2017-08-02 00:00:00", freq="H"), + pd.Period("2017-08-01 00:00:00", freq="h"), + pd.Period("2017-08-01 02:00", freq="h"), + pd.Period("2017-08-02 00:00:00", freq="h"), ], "categorical": pd.Categorical( ["c", "b", "a"], categories=["a", "b", "c"], ordered=False diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index 7164b7a046ff2..6b709522bab70 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -927,7 +927,7 @@ def test_mixed_freq_shared_ax_twin_x(self): @pytest.mark.xfail(reason="TODO (GH14330, GH14322)") def test_mixed_freq_shared_ax_twin_x_irregular_first(self): # GH13341, using sharex=True - idx1 = date_range("2015-01-01", periods=3, freq="M") + idx1 = date_range("2015-01-01", periods=3, freq="ME") idx2 = idx1[:1].union(idx1[2:]) s1 = Series(range(len(idx1)), idx1) s2 = Series(range(len(idx2)), idx2)
xref #52064, #54939 replaced deprecated frequencies `H` and `M` with correct `h` and `ME` in tests for plotting, marked as `@pytest.mark.xfail`
https://api.github.com/repos/pandas-dev/pandas/pulls/57877
2024-03-17T22:26:25Z
2024-03-21T10:06:31Z
2024-03-21T10:06:31Z
2024-03-21T10:06:31Z
ENH: add warning when export to tar, zip in append mode
diff --git a/pandas/io/common.py b/pandas/io/common.py index 35c3a24d8e8f6..4507a7d08c8ba 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -361,6 +361,16 @@ def _get_filepath_or_buffer( stacklevel=find_stack_level(), ) + if "a" in mode and compression_method in ["zip", "tar"]: + # GH56778 + warnings.warn( + "zip and tar do not support mode 'a' properly. " + "This combination will result in multiple files with same name " + "being added to the archive.", + RuntimeWarning, + stacklevel=find_stack_level(), + ) + # Use binary mode when converting path-like objects to file-like objects (fsspec) # except when text mode is explicitly requested. The original mode is returned if # fsspec is not used. diff --git a/pandas/tests/frame/methods/test_to_csv.py b/pandas/tests/frame/methods/test_to_csv.py index 5b9ced8d47ed7..f87fa4137d62d 100644 --- a/pandas/tests/frame/methods/test_to_csv.py +++ b/pandas/tests/frame/methods/test_to_csv.py @@ -1405,3 +1405,20 @@ def test_to_csv_categorical_and_interval(self): expected_rows = [",a", '0,"[2020-01-01 00:00:00, 2020-01-02 00:00:00]"'] expected = tm.convert_rows_list_to_csv_str(expected_rows) assert result == expected + + def test_to_csv_warn_when_zip_tar_and_append_mode(self): + # GH57875 + df = DataFrame({"a": [1, 2, 3]}) + msg = ( + "zip and tar do not support mode 'a' properly. This combination will " + "result in multiple files with same name being added to the archive" + ) + with tm.assert_produces_warning( + RuntimeWarning, match=msg, raise_on_extra_warnings=False + ): + df.to_csv("test.zip", mode="a") + + with tm.assert_produces_warning( + RuntimeWarning, match=msg, raise_on_extra_warnings=False + ): + df.to_csv("test.tar", mode="a")
- [ ] closes #56778 (Replace xxxx with the GitHub issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/57875
2024-03-17T13:55:14Z
2024-04-01T18:24:34Z
2024-04-01T18:24:34Z
2024-04-01T18:24:41Z
DOC: add note to how pandas deduplicate header when read from file
diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index 1ef2e65617c9b..5e9e7d0cf5ba8 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -195,6 +195,12 @@ class _read_shared(TypedDict, Generic[HashableT], total=False): parameter ignores commented lines and empty lines if ``skip_blank_lines=True``, so ``header=0`` denotes the first line of data rather than the first line of the file. + + When inferred from the file contents, headers are kept distinct from + each other by renaming duplicate names with a numeric suffix of the form + ``".{{count}}"`` starting from 1, e.g. ``"foo"`` and ``"foo.1"``. + Empty headers are named ``"Unnamed: {{i}}"`` or ``"Unnamed: {{i}}_level_{{level}}"`` + in the case of MultiIndex columns. names : Sequence of Hashable, optional Sequence of column labels to apply. If the file contains a header row, then you should explicitly pass ``header=0`` to override the column names.
- [ ] closes #57792 (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. <img width="904" alt="image" src="https://github.com/pandas-dev/pandas/assets/30631476/3d5ee26f-8b6c-4493-bcfd-5491cd68417d">
https://api.github.com/repos/pandas-dev/pandas/pulls/57874
2024-03-17T09:48:29Z
2024-04-01T18:42:54Z
2024-04-01T18:42:54Z
2024-04-01T18:43:01Z
DOC: Fix rendering of whatsnew entries
diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index aceef7a5d6923..6dac64449179b 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -91,16 +91,16 @@ Now using multiple groupings will also pass the unobserved groups to the provide Similarly: - - In previous versions of pandas the method :meth:`.DataFrameGroupBy.sum` would result in ``0`` for unobserved groups, but :meth:`.DataFrameGroupBy.prod`, :meth:`.DataFrameGroupBy.all`, and :meth:`.DataFrameGroupBy.any` would all result in NA values. Now these methods result in ``1``, ``True``, and ``False`` respectively. - - :meth:`.DataFrameGroupBy.groups` did not include unobserved groups and now does. +- In previous versions of pandas the method :meth:`.DataFrameGroupBy.sum` would result in ``0`` for unobserved groups, but :meth:`.DataFrameGroupBy.prod`, :meth:`.DataFrameGroupBy.all`, and :meth:`.DataFrameGroupBy.any` would all result in NA values. Now these methods result in ``1``, ``True``, and ``False`` respectively. +- :meth:`.DataFrameGroupBy.groups` did not include unobserved groups and now does. These improvements also fixed certain bugs in groupby: - - :meth:`.DataFrameGroupBy.nunique` would fail when there are multiple groupings, unobserved groups, and ``as_index=False`` (:issue:`52848`) - - :meth:`.DataFrameGroupBy.agg` would fail when there are multiple groupings, unobserved groups, and ``as_index=False`` (:issue:`36698`) - - :meth:`.DataFrameGroupBy.sum` would have incorrect values when there are multiple groupings, unobserved groups, and non-numeric data (:issue:`43891`) - - :meth:`.DataFrameGroupBy.groups` with ``sort=False`` would sort groups; they now occur in the order they are observed (:issue:`56966`) - - :meth:`.DataFrameGroupBy.value_counts` would produce incorrect results when used with some categorical and some non-categorical groupings and ``observed=False`` (:issue:`56016`) +- :meth:`.DataFrameGroupBy.agg` would fail when there are multiple groupings, unobserved groups, and ``as_index=False`` (:issue:`36698`) +- :meth:`.DataFrameGroupBy.groups` with ``sort=False`` would sort groups; they now occur in the order they are observed (:issue:`56966`) +- :meth:`.DataFrameGroupBy.nunique` would fail when there are multiple groupings, unobserved groups, and ``as_index=False`` (:issue:`52848`) +- :meth:`.DataFrameGroupBy.sum` would have incorrect values when there are multiple groupings, unobserved groups, and non-numeric data (:issue:`43891`) +- :meth:`.DataFrameGroupBy.value_counts` would produce incorrect results when used with some categorical and some non-categorical groupings and ``observed=False`` (:issue:`56016`) .. _whatsnew_300.notable_bug_fixes.notable_bug_fix2:
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. cc @rhshadrach those render in a weird box if you have a space in the beiginning
https://api.github.com/repos/pandas-dev/pandas/pulls/57871
2024-03-16T23:01:40Z
2024-03-17T00:32:48Z
2024-03-17T00:32:48Z
2024-03-19T20:08:53Z
DEPR: Deprecate remaining copy usages
diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index fb33601263c5d..3b9b91945624f 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -173,6 +173,13 @@ will be removed in a future version: - :meth:`DataFrame.astype` / :meth:`Series.astype` - :meth:`DataFrame.reindex` / :meth:`Series.reindex` - :meth:`DataFrame.reindex_like` / :meth:`Series.reindex_like` +- :meth:`DataFrame.set_axis` / :meth:`Series.set_axis` +- :meth:`DataFrame.to_period` / :meth:`Series.to_period` +- :meth:`DataFrame.to_timestamp` / :meth:`Series.to_timestamp` +- :meth:`DataFrame.rename` / :meth:`Series.rename` +- :meth:`DataFrame.transpose` +- :meth:`DataFrame.swaplevel` +- :meth:`DataFrame.merge` / :func:`pd.merge` Copy-on-Write utilizes a lazy copy mechanism that defers copying the data until necessary. Use ``.copy`` to trigger an eager copy. The copy keyword has no effect diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 501901e5b3593..b218dd899c8f8 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -357,7 +357,7 @@ of a string to indicate that the column name from `left` or `right` should be left as-is, with no suffix. At least one of the values must not be None. -copy : bool, default True +copy : bool, default False If False, avoid copy if possible. .. note:: @@ -371,6 +371,8 @@ You can already get the future behavior and improvements through enabling copy on write ``pd.options.mode.copy_on_write = True`` + + .. deprecated:: 3.0.0 indicator : bool or str, default False If True, adds a column to the output DataFrame called "_merge" with information on the source of each row. The column can be given a different @@ -3576,7 +3578,11 @@ def memory_usage(self, index: bool = True, deep: bool = False) -> Series: result = index_memory_usage._append(result) return result - def transpose(self, *args, copy: bool = False) -> DataFrame: + def transpose( + self, + *args, + copy: bool | lib.NoDefault = lib.no_default, + ) -> DataFrame: """ Transpose index and columns. @@ -3607,6 +3613,8 @@ def transpose(self, *args, copy: bool = False) -> DataFrame: You can already get the future behavior and improvements through enabling copy on write ``pd.options.mode.copy_on_write = True`` + .. deprecated:: 3.0.0 + Returns ------- DataFrame @@ -3687,6 +3695,7 @@ def transpose(self, *args, copy: bool = False) -> DataFrame: 1 object dtype: object """ + self._check_copy_deprecation(copy) nv.validate_transpose(args, {}) # construct the args @@ -5062,9 +5071,9 @@ def set_axis( labels, *, axis: Axis = 0, - copy: bool | None = None, + copy: bool | lib.NoDefault = lib.no_default, ) -> DataFrame: - return super().set_axis(labels, axis=axis) + return super().set_axis(labels, axis=axis, copy=copy) @doc( NDFrame.reindex, @@ -5313,7 +5322,7 @@ def rename( index: Renamer | None = ..., columns: Renamer | None = ..., axis: Axis | None = ..., - copy: bool | None = ..., + copy: bool | lib.NoDefault = lib.no_default, inplace: Literal[True], level: Level = ..., errors: IgnoreRaise = ..., @@ -5327,7 +5336,7 @@ def rename( index: Renamer | None = ..., columns: Renamer | None = ..., axis: Axis | None = ..., - copy: bool | None = ..., + copy: bool | lib.NoDefault = lib.no_default, inplace: Literal[False] = ..., level: Level = ..., errors: IgnoreRaise = ..., @@ -5341,7 +5350,7 @@ def rename( index: Renamer | None = ..., columns: Renamer | None = ..., axis: Axis | None = ..., - copy: bool | None = ..., + copy: bool | lib.NoDefault = lib.no_default, inplace: bool = ..., level: Level = ..., errors: IgnoreRaise = ..., @@ -5354,7 +5363,7 @@ def rename( index: Renamer | None = None, columns: Renamer | None = None, axis: Axis | None = None, - copy: bool | None = None, + copy: bool | lib.NoDefault = lib.no_default, inplace: bool = False, level: Level | None = None, errors: IgnoreRaise = "ignore", @@ -5384,7 +5393,7 @@ def rename( axis : {0 or 'index', 1 or 'columns'}, default 0 Axis to target with ``mapper``. Can be either the axis name ('index', 'columns') or number (0, 1). The default is 'index'. - copy : bool, default True + copy : bool, default False Also copy underlying data. .. note:: @@ -5398,6 +5407,8 @@ def rename( You can already get the future behavior and improvements through enabling copy on write ``pd.options.mode.copy_on_write = True`` + + .. deprecated:: 3.0.0 inplace : bool, default False Whether to modify the DataFrame rather than creating a new one. If True then value of copy is ignored. @@ -5478,6 +5489,7 @@ def rename( 2 2 5 4 3 6 """ + self._check_copy_deprecation(copy) return super()._rename( mapper=mapper, index=index, @@ -10657,10 +10669,12 @@ def merge( right_index: bool = False, sort: bool = False, suffixes: Suffixes = ("_x", "_y"), - copy: bool | None = None, + copy: bool | lib.NoDefault = lib.no_default, indicator: str | bool = False, validate: MergeValidate | None = None, ) -> DataFrame: + self._check_copy_deprecation(copy) + from pandas.core.reshape.merge import merge return merge( @@ -12462,7 +12476,7 @@ def to_timestamp( freq: Frequency | None = None, how: ToTimestampHow = "start", axis: Axis = 0, - copy: bool | None = None, + copy: bool | lib.NoDefault = lib.no_default, ) -> DataFrame: """ Cast to DatetimeIndex of timestamps, at *beginning* of period. @@ -12476,7 +12490,7 @@ def to_timestamp( vs. end. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to convert (the index by default). - copy : bool, default True + copy : bool, default False If False then underlying input data is not copied. .. note:: @@ -12491,6 +12505,8 @@ def to_timestamp( You can already get the future behavior and improvements through enabling copy on write ``pd.options.mode.copy_on_write = True`` + .. deprecated:: 3.0.0 + Returns ------- DataFrame @@ -12527,6 +12543,7 @@ def to_timestamp( >>> df2.index DatetimeIndex(['2023-01-31', '2024-01-31'], dtype='datetime64[ns]', freq=None) """ + self._check_copy_deprecation(copy) new_obj = self.copy(deep=False) axis_name = self._get_axis_name(axis) @@ -12540,7 +12557,10 @@ def to_timestamp( return new_obj def to_period( - self, freq: Frequency | None = None, axis: Axis = 0, copy: bool | None = None + self, + freq: Frequency | None = None, + axis: Axis = 0, + copy: bool | lib.NoDefault = lib.no_default, ) -> DataFrame: """ Convert DataFrame from DatetimeIndex to PeriodIndex. @@ -12554,7 +12574,7 @@ def to_period( Frequency of the PeriodIndex. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to convert (the index by default). - copy : bool, default True + copy : bool, default False If False then underlying input data is not copied. .. note:: @@ -12569,6 +12589,8 @@ def to_period( You can already get the future behavior and improvements through enabling copy on write ``pd.options.mode.copy_on_write = True`` + .. deprecated:: 3.0.0 + Returns ------- DataFrame @@ -12596,6 +12618,7 @@ def to_period( >>> idx.to_period("Y") PeriodIndex(['2001', '2002', '2003'], dtype='period[Y-DEC]') """ + self._check_copy_deprecation(copy) new_obj = self.copy(deep=False) axis_name = self._get_axis_name(axis) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index e20d23befa6a8..ebcb700e656f6 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -398,7 +398,7 @@ def flags(self) -> Flags: def set_flags( self, *, - copy: bool = False, + copy: bool | lib.NoDefault = lib.no_default, allows_duplicate_labels: bool | None = None, ) -> Self: """ @@ -420,6 +420,8 @@ def set_flags( You can already get the future behavior and improvements through enabling copy on write ``pd.options.mode.copy_on_write = True`` + + .. deprecated:: 3.0.0 allows_duplicate_labels : bool, optional Whether the returned object allows duplicate labels. @@ -454,6 +456,7 @@ def set_flags( >>> df2.flags.allows_duplicate_labels False """ + self._check_copy_deprecation(copy) df = self.copy(deep=False) if allows_duplicate_labels is not None: df.flags["allows_duplicate_labels"] = allows_duplicate_labels @@ -679,7 +682,7 @@ def set_axis( labels, *, axis: Axis = 0, - copy: bool | None = None, + copy: bool | lib.NoDefault = lib.no_default, ) -> Self: """ Assign desired index to given axis. @@ -696,7 +699,7 @@ def set_axis( The axis to update. The value 0 identifies the rows. For `Series` this parameter is unused and defaults to 0. - copy : bool, default True + copy : bool, default False Whether to make a copy of the underlying data. .. note:: @@ -711,6 +714,8 @@ def set_axis( You can already get the future behavior and improvements through enabling copy on write ``pd.options.mode.copy_on_write = True`` + .. deprecated:: 3.0.0 + Returns ------- %(klass)s @@ -720,6 +725,7 @@ def set_axis( -------- %(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s. """ + self._check_copy_deprecation(copy) return self._set_axis_nocheck(labels, axis, inplace=False) @overload @@ -948,7 +954,6 @@ def _rename( index: Renamer | None = ..., columns: Renamer | None = ..., axis: Axis | None = ..., - copy: bool | None = ..., inplace: Literal[False] = ..., level: Level | None = ..., errors: str = ..., @@ -962,7 +967,6 @@ def _rename( index: Renamer | None = ..., columns: Renamer | None = ..., axis: Axis | None = ..., - copy: bool | None = ..., inplace: Literal[True], level: Level | None = ..., errors: str = ..., @@ -976,7 +980,6 @@ def _rename( index: Renamer | None = ..., columns: Renamer | None = ..., axis: Axis | None = ..., - copy: bool | None = ..., inplace: bool, level: Level | None = ..., errors: str = ..., @@ -990,7 +993,6 @@ def _rename( index: Renamer | None = None, columns: Renamer | None = None, axis: Axis | None = None, - copy: bool | None = None, inplace: bool = False, level: Level | None = None, errors: str = "ignore", @@ -1061,7 +1063,7 @@ def rename_axis( index=..., columns=..., axis: Axis = ..., - copy: bool | None = ..., + copy: bool | lib.NoDefault = lib.no_default, inplace: Literal[False] = ..., ) -> Self: ... @@ -1073,7 +1075,7 @@ def rename_axis( index=..., columns=..., axis: Axis = ..., - copy: bool | None = ..., + copy: bool | lib.NoDefault = lib.no_default, inplace: Literal[True], ) -> None: ... @@ -1085,7 +1087,7 @@ def rename_axis( index=..., columns=..., axis: Axis = ..., - copy: bool | None = ..., + copy: bool | lib.NoDefault = lib.no_default, inplace: bool = ..., ) -> Self | None: ... @@ -1096,7 +1098,7 @@ def rename_axis( index=lib.no_default, columns=lib.no_default, axis: Axis = 0, - copy: bool | None = None, + copy: bool | lib.NoDefault = lib.no_default, inplace: bool = False, ) -> Self | None: """ @@ -1118,7 +1120,7 @@ def rename_axis( apply to that axis' values. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to rename. - copy : bool, default None + copy : bool, default False Also copy underlying data. .. note:: @@ -1132,6 +1134,8 @@ def rename_axis( You can already get the future behavior and improvements through enabling copy on write ``pd.options.mode.copy_on_write = True`` + + .. deprecated:: 3.0.0 inplace : bool, default False Modifies the object directly, instead of creating a new Series or DataFrame. @@ -1219,6 +1223,7 @@ class name cat 4 0 monkey 2 2 """ + self._check_copy_deprecation(copy) axes = {"index": index, "columns": columns} if axis is not None: @@ -6327,7 +6332,7 @@ def astype( return self.copy(deep=False) # GH 19920: retain column metadata after concat - result = concat(results, axis=1, copy=False) + result = concat(results, axis=1) # GH#40810 retain subclass # error: Incompatible types in assignment # (expression has type "Self", variable has type "DataFrame") diff --git a/pandas/core/interchange/dataframe.py b/pandas/core/interchange/dataframe.py index 1abacddfc7e3b..0a116af567e59 100644 --- a/pandas/core/interchange/dataframe.py +++ b/pandas/core/interchange/dataframe.py @@ -33,7 +33,7 @@ def __init__(self, df: DataFrame, allow_copy: bool = True) -> None: Constructor - an instance of this (private) class is returned from `pd.DataFrame.__dataframe__`. """ - self._df = df.rename(columns=str, copy=False) + self._df = df.rename(columns=str) self._allow_copy = allow_copy for i, _col in enumerate(self._df.columns): rechunked = maybe_rechunk(self._df.iloc[:, i], allow_copy=allow_copy) diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index 35a08e0167924..40af03b45fa44 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -12,10 +12,13 @@ cast, overload, ) +import warnings import numpy as np +from pandas._libs import lib from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import is_bool from pandas.core.dtypes.concat import concat_compat @@ -75,7 +78,7 @@ def concat( names: list[HashableT] | None = ..., verify_integrity: bool = ..., sort: bool = ..., - copy: bool | None = ..., + copy: bool | lib.NoDefault = ..., ) -> DataFrame: ... @@ -91,7 +94,7 @@ def concat( names: list[HashableT] | None = ..., verify_integrity: bool = ..., sort: bool = ..., - copy: bool | None = ..., + copy: bool | lib.NoDefault = ..., ) -> Series: ... @@ -107,7 +110,7 @@ def concat( names: list[HashableT] | None = ..., verify_integrity: bool = ..., sort: bool = ..., - copy: bool | None = ..., + copy: bool | lib.NoDefault = ..., ) -> DataFrame | Series: ... @@ -123,7 +126,7 @@ def concat( names: list[HashableT] | None = ..., verify_integrity: bool = ..., sort: bool = ..., - copy: bool | None = ..., + copy: bool | lib.NoDefault = ..., ) -> DataFrame: ... @@ -139,7 +142,7 @@ def concat( names: list[HashableT] | None = ..., verify_integrity: bool = ..., sort: bool = ..., - copy: bool | None = ..., + copy: bool | lib.NoDefault = ..., ) -> DataFrame | Series: ... @@ -154,7 +157,7 @@ def concat( names: list[HashableT] | None = None, verify_integrity: bool = False, sort: bool = False, - copy: bool | None = None, + copy: bool | lib.NoDefault = lib.no_default, ) -> DataFrame | Series: """ Concatenate pandas objects along a particular axis. @@ -198,9 +201,23 @@ def concat( non-concatentation axis is a DatetimeIndex and join='outer' and the axis is not already aligned. In that case, the non-concatenation axis is always sorted lexicographically. - copy : bool, default True + copy : bool, default False If False, do not copy data unnecessarily. + .. note:: + The `copy` keyword will change behavior in pandas 3.0. + `Copy-on-Write + <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__ + will be enabled by default, which means that all methods with a + `copy` keyword will use a lazy copy mechanism to defer the copy and + ignore the `copy` keyword. The `copy` keyword will be removed in a + future version of pandas. + + You can already get the future behavior and improvements through + enabling copy on write ``pd.options.mode.copy_on_write = True`` + + .. deprecated:: 3.0.0 + Returns ------- object, type of objs @@ -359,6 +376,15 @@ def concat( 0 1 2 1 3 4 """ + if copy is not lib.no_default: + warnings.warn( + "The copy keyword is deprecated and will be removed in a future " + "version. Copy-on-Write is active in pandas since 3.0 which utilizes " + "a lazy copy mechanism that defers copies until necessary. Use " + ".copy() to make an eager copy if necessary.", + DeprecationWarning, + stacklevel=find_stack_level(), + ) op = _Concatenator( objs, diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 2cd065d03ff53..dcb638cfee97b 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -145,11 +145,12 @@ def merge( right_index: bool = False, sort: bool = False, suffixes: Suffixes = ("_x", "_y"), - copy: bool | None = None, + copy: bool | lib.NoDefault = lib.no_default, indicator: str | bool = False, validate: str | None = None, ) -> DataFrame: left_df = _validate_operand(left) + left._check_copy_deprecation(copy) right_df = _validate_operand(right) if how == "cross": return _cross_merge( diff --git a/pandas/core/series.py b/pandas/core/series.py index 3adc2d2a44e73..0761dc17ab147 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -4093,7 +4093,7 @@ def nsmallest( ), ) def swaplevel( - self, i: Level = -2, j: Level = -1, copy: bool | None = None + self, i: Level = -2, j: Level = -1, copy: bool | lib.NoDefault = lib.no_default ) -> Series: """ Swap levels i and j in a :class:`MultiIndex`. @@ -4113,6 +4113,7 @@ def swaplevel( {examples} """ + self._check_copy_deprecation(copy) assert isinstance(self.index, MultiIndex) result = self.copy(deep=False) result.index = self.index.swaplevel(i, j) @@ -4611,7 +4612,7 @@ def rename( index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., - copy: bool = ..., + copy: bool | lib.NoDefault = ..., inplace: Literal[True], level: Level | None = ..., errors: IgnoreRaise = ..., @@ -4623,7 +4624,7 @@ def rename( index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., - copy: bool = ..., + copy: bool | lib.NoDefault = ..., inplace: Literal[False] = ..., level: Level | None = ..., errors: IgnoreRaise = ..., @@ -4635,7 +4636,7 @@ def rename( index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., - copy: bool = ..., + copy: bool | lib.NoDefault = ..., inplace: bool = ..., level: Level | None = ..., errors: IgnoreRaise = ..., @@ -4646,7 +4647,7 @@ def rename( index: Renamer | Hashable | None = None, *, axis: Axis | None = None, - copy: bool | None = None, + copy: bool | lib.NoDefault = lib.no_default, inplace: bool = False, level: Level | None = None, errors: IgnoreRaise = "ignore", @@ -4671,7 +4672,7 @@ def rename( attribute. axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. - copy : bool, default True + copy : bool, default False Also copy underlying data. .. note:: @@ -4685,6 +4686,8 @@ def rename( You can already get the future behavior and improvements through enabling copy on write ``pd.options.mode.copy_on_write = True`` + + .. deprecated:: 3.0.0 inplace : bool, default False Whether to return a new Series. If True the value of copy is ignored. level : int or level name, default None @@ -4728,6 +4731,7 @@ def rename( 5 3 dtype: int64 """ + self._check_copy_deprecation(copy) if axis is not None: # Make sure we raise if an invalid 'axis' is passed. axis = self._get_axis_number(axis) @@ -4777,9 +4781,9 @@ def set_axis( labels, *, axis: Axis = 0, - copy: bool | None = None, + copy: bool | lib.NoDefault = lib.no_default, ) -> Series: - return super().set_axis(labels, axis=axis) + return super().set_axis(labels, axis=axis, copy=copy) # error: Cannot determine type of 'reindex' @doc( @@ -4816,7 +4820,7 @@ def rename_axis( *, index=..., axis: Axis = ..., - copy: bool = ..., + copy: bool | lib.NoDefault = ..., inplace: Literal[True], ) -> None: ... @@ -4827,7 +4831,7 @@ def rename_axis( *, index=..., axis: Axis = ..., - copy: bool = ..., + copy: bool | lib.NoDefault = ..., inplace: Literal[False] = ..., ) -> Self: ... @@ -4838,7 +4842,7 @@ def rename_axis( *, index=..., axis: Axis = ..., - copy: bool = ..., + copy: bool | lib.NoDefault = ..., inplace: bool = ..., ) -> Self | None: ... @@ -4848,7 +4852,7 @@ def rename_axis( *, index=lib.no_default, axis: Axis = 0, - copy: bool = True, + copy: bool | lib.NoDefault = lib.no_default, inplace: bool = False, ) -> Self | None: """ @@ -4867,7 +4871,7 @@ def rename_axis( apply to that axis' values. axis : {0 or 'index'}, default 0 The axis to rename. For `Series` this parameter is unused and defaults to 0. - copy : bool, default None + copy : bool, default False Also copy underlying data. .. note:: @@ -4917,6 +4921,7 @@ def rename_axis( index=index, axis=axis, inplace=inplace, + copy=copy, ) @overload @@ -5640,7 +5645,7 @@ def to_timestamp( self, freq: Frequency | None = None, how: Literal["s", "e", "start", "end"] = "start", - copy: bool | None = None, + copy: bool | lib.NoDefault = lib.no_default, ) -> Series: """ Cast to DatetimeIndex of Timestamps, at *beginning* of period. @@ -5652,7 +5657,7 @@ def to_timestamp( how : {'s', 'e', 'start', 'end'} Convention for converting period to timestamp; start of period vs. end. - copy : bool, default True + copy : bool, default False Whether or not to return a copy. .. note:: @@ -5667,6 +5672,8 @@ def to_timestamp( You can already get the future behavior and improvements through enabling copy on write ``pd.options.mode.copy_on_write = True`` + .. deprecated:: 3.0.0 + Returns ------- Series with DatetimeIndex @@ -5700,6 +5707,7 @@ def to_timestamp( 2025-01-31 3 Freq: YE-JAN, dtype: int64 """ + self._check_copy_deprecation(copy) if not isinstance(self.index, PeriodIndex): raise TypeError(f"unsupported Type {type(self.index).__name__}") @@ -5708,7 +5716,11 @@ def to_timestamp( setattr(new_obj, "index", new_index) return new_obj - def to_period(self, freq: str | None = None, copy: bool | None = None) -> Series: + def to_period( + self, + freq: str | None = None, + copy: bool | lib.NoDefault = lib.no_default, + ) -> Series: """ Convert Series from DatetimeIndex to PeriodIndex. @@ -5716,7 +5728,7 @@ def to_period(self, freq: str | None = None, copy: bool | None = None) -> Series ---------- freq : str, default None Frequency associated with the PeriodIndex. - copy : bool, default True + copy : bool, default False Whether or not to return a copy. .. note:: @@ -5731,6 +5743,8 @@ def to_period(self, freq: str | None = None, copy: bool | None = None) -> Series You can already get the future behavior and improvements through enabling copy on write ``pd.options.mode.copy_on_write = True`` + .. deprecated:: 3.0.0 + Returns ------- Series @@ -5752,6 +5766,7 @@ def to_period(self, freq: str | None = None, copy: bool | None = None) -> Series >>> s.index PeriodIndex(['2023', '2024', '2025'], dtype='period[Y-DEC]') """ + self._check_copy_deprecation(copy) if not isinstance(self.index, DatetimeIndex): raise TypeError(f"unsupported Type {type(self.index).__name__}") diff --git a/pandas/tests/copy_view/test_copy_deprecation.py b/pandas/tests/copy_view/test_copy_deprecation.py index ca57c02112131..8ee37213b92ab 100644 --- a/pandas/tests/copy_view/test_copy_deprecation.py +++ b/pandas/tests/copy_view/test_copy_deprecation.py @@ -1,6 +1,10 @@ import pytest import pandas as pd +from pandas import ( + concat, + merge, +) import pandas._testing as tm @@ -13,20 +17,33 @@ ("infer_objects", {}), ("astype", {"dtype": "float64"}), ("reindex", {"index": [2, 0, 1]}), + ("transpose", {}), + ("set_axis", {"labels": [1, 2, 3]}), + ("rename", {"index": {1: 2}}), + ("set_flags", {}), + ("to_period", {}), + ("to_timestamp", {}), + ("swaplevel", {"i": 0, "j": 1}), ], ) def test_copy_deprecation(meth, kwargs): - df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": 1}) - if meth in ("tz_convert", "tz_localize"): - tz = None if meth == "tz_localize" else "US/Eastern" + if meth in ("tz_convert", "tz_localize", "to_period"): + tz = None if meth in ("tz_localize", "to_period") else "US/Eastern" df.index = pd.date_range("2020-01-01", freq="D", periods=len(df), tz=tz) + elif meth == "to_timestamp": + df.index = pd.period_range("2020-01-01", freq="D", periods=len(df)) + elif meth == "swaplevel": + df = df.set_index(["b", "c"]) - with tm.assert_produces_warning(DeprecationWarning, match="copy"): - getattr(df, meth)(copy=False, **kwargs) + if meth != "swaplevel": + with tm.assert_produces_warning(DeprecationWarning, match="copy"): + getattr(df, meth)(copy=False, **kwargs) - with tm.assert_produces_warning(DeprecationWarning, match="copy"): - getattr(df.a, meth)(copy=False, **kwargs) + if meth != "transpose": + with tm.assert_produces_warning(DeprecationWarning, match="copy"): + getattr(df.a, meth)(copy=False, **kwargs) def test_copy_deprecation_reindex_like_align(): @@ -51,3 +68,22 @@ def test_copy_deprecation_reindex_like_align(): DeprecationWarning, match="copy", check_stacklevel=False ): df.a.align(df.a, copy=False) + + +def test_copy_deprecation_merge_concat(): + df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + + with tm.assert_produces_warning( + DeprecationWarning, match="copy", check_stacklevel=False + ): + df.merge(df, copy=False) + + with tm.assert_produces_warning( + DeprecationWarning, match="copy", check_stacklevel=False + ): + merge(df, df, copy=False) + + with tm.assert_produces_warning( + DeprecationWarning, match="copy", check_stacklevel=False + ): + concat([df, df], copy=False) diff --git a/pandas/tests/copy_view/test_functions.py b/pandas/tests/copy_view/test_functions.py index eeb19103f7bd5..196d908a44a46 100644 --- a/pandas/tests/copy_view/test_functions.py +++ b/pandas/tests/copy_view/test_functions.py @@ -139,12 +139,11 @@ def test_concat_mixed_series_frame(): tm.assert_frame_equal(result, expected) -@pytest.mark.parametrize("copy", [True, None, False]) -def test_concat_copy_keyword(copy): +def test_concat_copy_keyword(): df = DataFrame({"a": [1, 2]}) df2 = DataFrame({"b": [1.5, 2.5]}) - result = concat([df, df2], axis=1, copy=copy) + result = concat([df, df2], axis=1) assert np.shares_memory(get_array(df, "a"), get_array(result, "a")) assert np.shares_memory(get_array(df2, "b"), get_array(result, "b")) @@ -234,12 +233,11 @@ def test_merge_on_key_enlarging_one(func, how): tm.assert_frame_equal(df2, df2_orig) -@pytest.mark.parametrize("copy", [True, None, False]) -def test_merge_copy_keyword(copy): +def test_merge_copy_keyword(): df = DataFrame({"a": [1, 2]}) df2 = DataFrame({"b": [3, 4.5]}) - result = df.merge(df2, copy=copy, left_index=True, right_index=True) + result = df.merge(df2, left_index=True, right_index=True) assert np.shares_memory(get_array(df, "a"), get_array(result, "a")) assert np.shares_memory(get_array(df2, "b"), get_array(result, "b")) diff --git a/pandas/tests/copy_view/test_methods.py b/pandas/tests/copy_view/test_methods.py index 8bf0e81e74e25..3712a74fe54ed 100644 --- a/pandas/tests/copy_view/test_methods.py +++ b/pandas/tests/copy_view/test_methods.py @@ -176,13 +176,6 @@ def test_methods_series_copy_keyword(request, method, copy): assert np.shares_memory(get_array(ser2), get_array(ser)) -@pytest.mark.parametrize("copy", [True, None, False]) -def test_transpose_copy_keyword(copy): - df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) - result = df.transpose(copy=copy) - assert np.shares_memory(get_array(df, "a"), get_array(result, 0)) - - # ----------------------------------------------------------------------------- # DataFrame methods returning new DataFrame using shallow copy @@ -1415,11 +1408,10 @@ def test_inplace_arithmetic_series_with_reference(): tm.assert_series_equal(ser_orig, view) -@pytest.mark.parametrize("copy", [True, False]) -def test_transpose(copy): +def test_transpose(): df = DataFrame({"a": [1, 2, 3], "b": 1}) df_orig = df.copy() - result = df.transpose(copy=copy) + result = df.transpose() assert np.shares_memory(get_array(df, "a"), get_array(result, 0)) result.iloc[0, 0] = 100 diff --git a/pandas/tests/dtypes/test_concat.py b/pandas/tests/dtypes/test_concat.py index 4f7ae6fa2a0a0..1652c9254061b 100644 --- a/pandas/tests/dtypes/test_concat.py +++ b/pandas/tests/dtypes/test_concat.py @@ -20,14 +20,13 @@ def test_concat_mismatched_categoricals_with_empty(): tm.assert_categorical_equal(result, expected) -@pytest.mark.parametrize("copy", [True, False]) -def test_concat_single_dataframe_tz_aware(copy): +def test_concat_single_dataframe_tz_aware(): # https://github.com/pandas-dev/pandas/issues/25257 df = pd.DataFrame( {"timestamp": [pd.Timestamp("2020-04-08 09:00:00.709949+0000", tz="UTC")]} ) expected = df.copy() - result = pd.concat([df], copy=copy) + result = pd.concat([df]) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/extension/base/reshaping.py b/pandas/tests/extension/base/reshaping.py index 4550e3b055cfe..489cd15644d04 100644 --- a/pandas/tests/extension/base/reshaping.py +++ b/pandas/tests/extension/base/reshaping.py @@ -106,7 +106,7 @@ def test_concat_extension_arrays_copy_false(self, data, na_value): "B": data[3:7], } ) - result = pd.concat([df1, df2], axis=1, copy=False) + result = pd.concat([df1, df2], axis=1) tm.assert_frame_equal(result, expected) def test_concat_with_reindex(self, data): diff --git a/pandas/tests/frame/methods/test_rename.py b/pandas/tests/frame/methods/test_rename.py index 996fc30552bc4..6153a168476d4 100644 --- a/pandas/tests/frame/methods/test_rename.py +++ b/pandas/tests/frame/methods/test_rename.py @@ -165,7 +165,7 @@ def test_rename_multiindex(self): tm.assert_index_equal(renamed.index, new_index) def test_rename_nocopy(self, float_frame): - renamed = float_frame.rename(columns={"C": "foo"}, copy=False) + renamed = float_frame.rename(columns={"C": "foo"}) assert np.shares_memory(renamed["foo"]._values, float_frame["C"]._values) diff --git a/pandas/tests/frame/methods/test_set_axis.py b/pandas/tests/frame/methods/test_set_axis.py index 8c42498b45621..1967941bca9f0 100644 --- a/pandas/tests/frame/methods/test_set_axis.py +++ b/pandas/tests/frame/methods/test_set_axis.py @@ -29,10 +29,7 @@ def test_set_axis_copy(self, obj): expected = obj.copy() expected.index = new_index - result = obj.set_axis(new_index, axis=0, copy=True) - tm.assert_equal(expected, result) - assert result is not obj - result = obj.set_axis(new_index, axis=0, copy=False) + result = obj.set_axis(new_index, axis=0) tm.assert_equal(expected, result) assert result is not obj # check we did NOT make a copy @@ -44,7 +41,6 @@ def test_set_axis_copy(self, obj): for i in range(obj.shape[1]) ) - # copy defaults to True result = obj.set_axis(new_index, axis=0) tm.assert_equal(expected, result) assert result is not obj @@ -57,7 +53,7 @@ def test_set_axis_copy(self, obj): for i in range(obj.shape[1]) ) - res = obj.set_axis(new_index, copy=False) + res = obj.set_axis(new_index) tm.assert_equal(expected, res) # check we did NOT make a copy if res.ndim == 1: diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index 680800d7f5e4c..48f51dfa981ca 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -356,9 +356,7 @@ def test_set_flags( assert obj.iloc[key] == 1 # Now we do copy. - result = obj.set_flags( - copy=True, allows_duplicate_labels=allows_duplicate_labels - ) + result = obj.set_flags(allows_duplicate_labels=allows_duplicate_labels) result.iloc[key] = 10 assert obj.iloc[key] == 1 diff --git a/pandas/tests/reshape/concat/test_concat.py b/pandas/tests/reshape/concat/test_concat.py index cf11bf237f615..b986aa8182219 100644 --- a/pandas/tests/reshape/concat/test_concat.py +++ b/pandas/tests/reshape/concat/test_concat.py @@ -50,12 +50,12 @@ def test_concat_copy(self): df3 = DataFrame({5: "foo"}, index=range(4)) # These are actual copies. - result = concat([df, df2, df3], axis=1, copy=True) + result = concat([df, df2, df3], axis=1) for arr in result._mgr.arrays: assert arr.base is not None # These are the same. - result = concat([df, df2, df3], axis=1, copy=False) + result = concat([df, df2, df3], axis=1) for arr in result._mgr.arrays: if arr.dtype.kind == "f": @@ -67,7 +67,7 @@ def test_concat_copy(self): # Float block was consolidated. df4 = DataFrame(np.random.default_rng(2).standard_normal((4, 1))) - result = concat([df, df2, df3, df4], axis=1, copy=False) + result = concat([df, df2, df3, df4], axis=1) for arr in result._mgr.arrays: if arr.dtype.kind == "f": # this is a view on some array in either df or df4 diff --git a/pandas/tests/reshape/concat/test_index.py b/pandas/tests/reshape/concat/test_index.py index ca544c5d42a25..68d77b79a59e7 100644 --- a/pandas/tests/reshape/concat/test_index.py +++ b/pandas/tests/reshape/concat/test_index.py @@ -101,7 +101,7 @@ def test_concat_rename_index(self): def test_concat_copy_index_series(self, axis): # GH 29879 ser = Series([1, 2]) - comb = concat([ser, ser], axis=axis, copy=True) + comb = concat([ser, ser], axis=axis) if axis in [0, "index"]: assert comb.index is not ser.index else: @@ -110,7 +110,7 @@ def test_concat_copy_index_series(self, axis): def test_concat_copy_index_frame(self, axis): # GH 29879 df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"]) - comb = concat([df, df], axis=axis, copy=True) + comb = concat([df, df], axis=axis) if axis in [0, "index"]: assert not comb.index.is_(df.index) assert comb.columns.is_(df.columns) diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index f063f333ac889..1cd52ab1ae8b4 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -260,7 +260,7 @@ def test_merge_copy(self): left = DataFrame({"a": 0, "b": 1}, index=range(10)) right = DataFrame({"c": "foo", "d": "bar"}, index=range(10)) - merged = merge(left, right, left_index=True, right_index=True, copy=True) + merged = merge(left, right, left_index=True, right_index=True) merged["a"] = 6 assert (left["a"] == 0).all() @@ -272,7 +272,7 @@ def test_merge_nocopy(self, using_infer_string): left = DataFrame({"a": 0, "b": 1}, index=range(10)) right = DataFrame({"c": "foo", "d": "bar"}, index=range(10)) - merged = merge(left, right, left_index=True, right_index=True, copy=False) + merged = merge(left, right, left_index=True, right_index=True) assert np.shares_memory(merged["a"]._values, left["a"]._values) if not using_infer_string: diff --git a/pandas/tests/series/methods/test_rename.py b/pandas/tests/series/methods/test_rename.py index c67298b777f6d..1da98b3a273be 100644 --- a/pandas/tests/series/methods/test_rename.py +++ b/pandas/tests/series/methods/test_rename.py @@ -173,7 +173,7 @@ def test_rename_copy_false(self): # GH 46889 ser = Series(["foo", "bar"]) ser_orig = ser.copy() - shallow_copy = ser.rename({1: 9}, copy=False) + shallow_copy = ser.rename({1: 9}) ser[0] = "foobar" assert ser_orig[0] == shallow_copy[0] assert ser_orig[1] == shallow_copy[9]
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/57870
2024-03-16T22:56:33Z
2024-03-26T20:25:50Z
2024-03-26T20:25:50Z
2024-03-26T20:25:58Z
CLN: enforce deprecation of `NDFrame.interpolate` with `ffill/bfill/pad/backfill` methods
diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index ef561d50066d1..f60aa6e4f26b1 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -211,6 +211,7 @@ Removal of prior version deprecations/changes - Enforced deprecation of strings ``T``, ``L``, ``U``, and ``N`` denoting frequencies in :class:`Minute`, :class:`Second`, :class:`Milli`, :class:`Micro`, :class:`Nano` (:issue:`57627`) - Enforced deprecation of strings ``T``, ``L``, ``U``, and ``N`` denoting units in :class:`Timedelta` (:issue:`57627`) - Enforced deprecation of the behavior of :func:`concat` when ``len(keys) != len(objs)`` would truncate to the shorter of the two. Now this raises a ``ValueError`` (:issue:`43485`) +- Enforced deprecation of values "pad", "ffill", "bfill", and "backfill" for :meth:`Series.interpolate` and :meth:`DataFrame.interpolate` (:issue:`57869`) - Enforced silent-downcasting deprecation for :ref:`all relevant methods <whatsnew_220.silent_downcasting>` (:issue:`54710`) - In :meth:`DataFrame.stack`, the default value of ``future_stack`` is now ``True``; specifying ``False`` will raise a ``FutureWarning`` (:issue:`55448`) - Iterating over a :class:`.DataFrameGroupBy` or :class:`.SeriesGroupBy` will return tuples of length 1 for the groups when grouping by ``level`` a list of length 1 (:issue:`50064`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index c9e6ffe1d7dc6..c0eda7f022d8f 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -7624,7 +7624,6 @@ def interpolate( * 'time': Works on daily and higher resolution data to interpolate given length of interval. * 'index', 'values': use the actual numerical values of the index. - * 'pad': Fill in NaNs using existing values. * 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'barycentric', 'polynomial': Passed to `scipy.interpolate.interp1d`, whereas 'spline' is passed to @@ -7648,23 +7647,9 @@ def interpolate( 0. inplace : bool, default False Update the data in place if possible. - limit_direction : {{'forward', 'backward', 'both'}}, Optional + limit_direction : {{'forward', 'backward', 'both'}}, optional, default 'forward' Consecutive NaNs will be filled in this direction. - If limit is specified: - * If 'method' is 'pad' or 'ffill', 'limit_direction' must be 'forward'. - * If 'method' is 'backfill' or 'bfill', 'limit_direction' must be - 'backwards'. - - If 'limit' is not specified: - * If 'method' is 'backfill' or 'bfill', the default is 'backward' - * else the default is 'forward' - - raises ValueError if `limit_direction` is 'forward' or 'both' and - method is 'backfill' or 'bfill'. - raises ValueError if `limit_direction` is 'backward' or 'both' and - method is 'pad' or 'ffill'. - limit_area : {{`None`, 'inside', 'outside'}}, default None If limit is specified, consecutive NaNs will be filled with this restriction. @@ -7797,30 +7782,11 @@ def interpolate( if not isinstance(method, str): raise ValueError("'method' should be a string, not None.") - fillna_methods = ["ffill", "bfill", "pad", "backfill"] - if method.lower() in fillna_methods: - # GH#53581 - warnings.warn( - f"{type(self).__name__}.interpolate with method={method} is " - "deprecated and will raise in a future version. " - "Use obj.ffill() or obj.bfill() instead.", - FutureWarning, - stacklevel=find_stack_level(), - ) - obj, should_transpose = self, False - else: - obj, should_transpose = (self.T, True) if axis == 1 else (self, False) - # GH#53631 - if np.any(obj.dtypes == object): - raise TypeError( - f"{type(self).__name__} cannot interpolate with object dtype." - ) - - if method in fillna_methods and "fill_value" in kwargs: - raise ValueError( - "'fill_value' is not a valid keyword for " - f"{type(self).__name__}.interpolate with method from " - f"{fillna_methods}" + obj, should_transpose = (self.T, True) if axis == 1 else (self, False) + # GH#53631 + if np.any(obj.dtypes == object): + raise TypeError( + f"{type(self).__name__} cannot interpolate with object dtype." ) if isinstance(obj.index, MultiIndex) and method != "linear": @@ -7830,34 +7796,16 @@ def interpolate( limit_direction = missing.infer_limit_direction(limit_direction, method) - if method.lower() in fillna_methods: - # TODO(3.0): remove this case - # TODO: warn/raise on limit_direction or kwargs which are ignored? - # as of 2023-06-26 no tests get here with either - if not self._mgr.is_single_block and axis == 1: - # GH#53898 - if inplace: - raise NotImplementedError() - obj, axis, should_transpose = self.T, 1 - axis, True - - new_data = obj._mgr.pad_or_backfill( - method=method, - axis=self._get_block_manager_axis(axis), - limit=limit, - limit_area=limit_area, - inplace=inplace, - ) - else: - index = missing.get_interp_index(method, obj.index) - new_data = obj._mgr.interpolate( - method=method, - index=index, - limit=limit, - limit_direction=limit_direction, - limit_area=limit_area, - inplace=inplace, - **kwargs, - ) + index = missing.get_interp_index(method, obj.index) + new_data = obj._mgr.interpolate( + method=method, + index=index, + limit=limit, + limit_direction=limit_direction, + limit_area=limit_area, + inplace=inplace, + **kwargs, + ) result = self._constructor_from_mgr(new_data, axes=new_data.axes) if should_transpose: diff --git a/pandas/core/missing.py b/pandas/core/missing.py index de26ad14a7b7a..b3e152e36a304 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -322,13 +322,17 @@ def get_interp_index(method, index: Index) -> Index: or isinstance(index.dtype, DatetimeTZDtype) or lib.is_np_dtype(index.dtype, "mM") ) - if method not in methods and not is_numeric_or_datetime: - raise ValueError( - "Index column must be numeric or datetime type when " - f"using {method} method other than linear. " - "Try setting a numeric or datetime index column before " - "interpolating." - ) + valid = NP_METHODS + SP_METHODS + if method in valid: + if method not in methods and not is_numeric_or_datetime: + raise ValueError( + "Index column must be numeric or datetime type when " + f"using {method} method other than linear. " + "Try setting a numeric or datetime index column before " + "interpolating." + ) + else: + raise ValueError(f"Can not interpolate with method={method}.") if isna(index).any(): raise NotImplementedError( @@ -611,7 +615,9 @@ def _interpolate_scipy_wrapper( y = y.copy() if not new_x.flags.writeable: new_x = new_x.copy() - terp = alt_methods[method] + terp = alt_methods.get(method, None) + if terp is None: + raise ValueError(f"Can not interpolate with method={method}.") new_y = terp(x, y, new_x, **kwargs) return new_y diff --git a/pandas/tests/copy_view/test_interp_fillna.py b/pandas/tests/copy_view/test_interp_fillna.py index 8fe58e59b9cfd..abd87162ec32e 100644 --- a/pandas/tests/copy_view/test_interp_fillna.py +++ b/pandas/tests/copy_view/test_interp_fillna.py @@ -19,19 +19,18 @@ def test_interpolate_no_op(method): df = DataFrame({"a": [1, 2]}) df_orig = df.copy() - warn = None if method == "pad": - warn = FutureWarning - msg = "DataFrame.interpolate with method=pad is deprecated" - with tm.assert_produces_warning(warn, match=msg): + msg = f"Can not interpolate with method={method}" + with pytest.raises(ValueError, match=msg): + df.interpolate(method=method) + else: result = df.interpolate(method=method) + assert np.shares_memory(get_array(result, "a"), get_array(df, "a")) - assert np.shares_memory(get_array(result, "a"), get_array(df, "a")) + result.iloc[0, 0] = 100 - result.iloc[0, 0] = 100 - - assert not np.shares_memory(get_array(result, "a"), get_array(df, "a")) - tm.assert_frame_equal(df, df_orig) + assert not np.shares_memory(get_array(result, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) @pytest.mark.parametrize("func", ["ffill", "bfill"]) @@ -122,9 +121,6 @@ def test_interpolate_cannot_with_object_dtype(): def test_interpolate_object_convert_no_op(): df = DataFrame({"a": ["a", "b", "c"], "b": 1}) arr_a = get_array(df, "a") - msg = "DataFrame.interpolate with method=pad is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg): - df.interpolate(method="pad", inplace=True) # Now CoW makes a copy, it should not! assert df._mgr._has_no_reference(0) @@ -134,8 +130,8 @@ def test_interpolate_object_convert_no_op(): def test_interpolate_object_convert_copies(): df = DataFrame({"a": [1, np.nan, 2.5], "b": 1}) arr_a = get_array(df, "a") - msg = "DataFrame.interpolate with method=pad is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg): + msg = "Can not interpolate with method=pad" + with pytest.raises(ValueError, match=msg): df.interpolate(method="pad", inplace=True, downcast="infer") assert df._mgr._has_no_reference(0) @@ -147,12 +143,13 @@ def test_interpolate_downcast_reference_triggers_copy(): df_orig = df.copy() arr_a = get_array(df, "a") view = df[:] - msg = "DataFrame.interpolate with method=pad is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg): + + msg = "Can not interpolate with method=pad" + with pytest.raises(ValueError, match=msg): df.interpolate(method="pad", inplace=True, downcast="infer") + assert df._mgr._has_no_reference(0) + assert not np.shares_memory(arr_a, get_array(df, "a")) - assert df._mgr._has_no_reference(0) - assert not np.shares_memory(arr_a, get_array(df, "a")) tm.assert_frame_equal(df_orig, view) diff --git a/pandas/tests/frame/methods/test_interpolate.py b/pandas/tests/frame/methods/test_interpolate.py index 2ba3bbd3109a2..0a9d059736e6f 100644 --- a/pandas/tests/frame/methods/test_interpolate.py +++ b/pandas/tests/frame/methods/test_interpolate.py @@ -129,13 +129,7 @@ def test_interp_bad_method(self): "C": [1, 2, 3, 5], } ) - msg = ( - r"method must be one of \['linear', 'time', 'index', 'values', " - r"'nearest', 'zero', 'slinear', 'quadratic', 'cubic', " - r"'barycentric', 'krogh', 'spline', 'polynomial', " - r"'from_derivatives', 'piecewise_polynomial', 'pchip', 'akima', " - r"'cubicspline'\]. Got 'not_a_method' instead." - ) + msg = "Can not interpolate with method=not_a_method" with pytest.raises(ValueError, match=msg): df.interpolate(method="not_a_method") @@ -398,12 +392,9 @@ def test_interp_fillna_methods(self, axis, multiblock, method): df["D"] = np.nan df["E"] = 1.0 - method2 = method if method != "pad" else "ffill" - expected = getattr(df, method2)(axis=axis) - msg = f"DataFrame.interpolate with method={method} is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg): - result = df.interpolate(method=method, axis=axis) - tm.assert_frame_equal(result, expected) + msg = f"Can not interpolate with method={method}" + with pytest.raises(ValueError, match=msg): + df.interpolate(method=method, axis=axis) def test_interpolate_empty_df(self): # GH#53199 diff --git a/pandas/tests/series/methods/test_interpolate.py b/pandas/tests/series/methods/test_interpolate.py index e4726f3ec6b32..c5df1fd498938 100644 --- a/pandas/tests/series/methods/test_interpolate.py +++ b/pandas/tests/series/methods/test_interpolate.py @@ -344,7 +344,7 @@ def test_interpolate_invalid_float_limit(self, nontemporal_method): def test_interp_invalid_method(self, invalid_method): s = Series([1, 3, np.nan, 12, np.nan, 25]) - msg = f"method must be one of.* Got '{invalid_method}' instead" + msg = "Can not interpolate with method=nonexistent_method" if invalid_method is None: msg = "'method' should be a string, not None" with pytest.raises(ValueError, match=msg): @@ -355,16 +355,6 @@ def test_interp_invalid_method(self, invalid_method): with pytest.raises(ValueError, match=msg): s.interpolate(method=invalid_method, limit=-1) - def test_interp_invalid_method_and_value(self): - # GH#36624 - ser = Series([1, 3, np.nan, 12, np.nan, 25]) - - msg = "'fill_value' is not a valid keyword for Series.interpolate" - msg2 = "Series.interpolate with method=pad" - with pytest.raises(ValueError, match=msg): - with tm.assert_produces_warning(FutureWarning, match=msg2): - ser.interpolate(fill_value=3, method="pad") - def test_interp_limit_forward(self): s = Series([1, 3, np.nan, np.nan, np.nan, 11]) @@ -455,107 +445,70 @@ def test_interp_limit_area(self): s.interpolate(method="linear", limit_area="abc") @pytest.mark.parametrize( - "method, limit_direction, expected", - [ - ("pad", "backward", "forward"), - ("ffill", "backward", "forward"), - ("backfill", "forward", "backward"), - ("bfill", "forward", "backward"), - ("pad", "both", "forward"), - ("ffill", "both", "forward"), - ("backfill", "both", "backward"), - ("bfill", "both", "backward"), - ], - ) - def test_interp_limit_direction_raises(self, method, limit_direction, expected): - # https://github.com/pandas-dev/pandas/pull/34746 - s = Series([1, 2, 3]) - - msg = f"`limit_direction` must be '{expected}' for method `{method}`" - msg2 = "Series.interpolate with method=" - with pytest.raises(ValueError, match=msg): - with tm.assert_produces_warning(FutureWarning, match=msg2): - s.interpolate(method=method, limit_direction=limit_direction) - - @pytest.mark.parametrize( - "data, expected_data, kwargs", + "data, kwargs", ( ( [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan], - [np.nan, np.nan, 3.0, 3.0, 3.0, 3.0, 7.0, np.nan, np.nan], {"method": "pad", "limit_area": "inside"}, ), ( [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan], - [np.nan, np.nan, 3.0, 3.0, np.nan, np.nan, 7.0, np.nan, np.nan], {"method": "pad", "limit_area": "inside", "limit": 1}, ), ( [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan], - [np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, 7.0], {"method": "pad", "limit_area": "outside"}, ), ( [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan], - [np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, np.nan], {"method": "pad", "limit_area": "outside", "limit": 1}, ), ( - [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], {"method": "pad", "limit_area": "outside", "limit": 1}, ), ( - range(5), range(5), {"method": "pad", "limit_area": "outside", "limit": 1}, ), ), ) - def test_interp_limit_area_with_pad(self, data, expected_data, kwargs): + def test_interp_limit_area_with_pad(self, data, kwargs): # GH26796 s = Series(data) - expected = Series(expected_data) - msg = "Series.interpolate with method=pad" - with tm.assert_produces_warning(FutureWarning, match=msg): - result = s.interpolate(**kwargs) - tm.assert_series_equal(result, expected) + msg = "Can not interpolate with method=pad" + with pytest.raises(ValueError, match=msg): + s.interpolate(**kwargs) @pytest.mark.parametrize( - "data, expected_data, kwargs", + "data, kwargs", ( ( [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan], - [np.nan, np.nan, 3.0, 7.0, 7.0, 7.0, 7.0, np.nan, np.nan], {"method": "bfill", "limit_area": "inside"}, ), ( [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan], - [np.nan, np.nan, 3.0, np.nan, np.nan, 7.0, 7.0, np.nan, np.nan], {"method": "bfill", "limit_area": "inside", "limit": 1}, ), ( [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan], - [3.0, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, np.nan, np.nan], {"method": "bfill", "limit_area": "outside"}, ), ( [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan], - [np.nan, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, np.nan, np.nan], {"method": "bfill", "limit_area": "outside", "limit": 1}, ), ), ) - def test_interp_limit_area_with_backfill(self, data, expected_data, kwargs): + def test_interp_limit_area_with_backfill(self, data, kwargs): # GH26796 - s = Series(data) - expected = Series(expected_data) - msg = "Series.interpolate with method=bfill" - with tm.assert_produces_warning(FutureWarning, match=msg): - result = s.interpolate(**kwargs) - tm.assert_series_equal(result, expected) + + msg = "Can not interpolate with method=bfill" + with pytest.raises(ValueError, match=msg): + s.interpolate(**kwargs) def test_interp_limit_direction(self): # These tests are for issue #9218 -- fill NaNs in both directions. @@ -650,20 +603,18 @@ def test_interp_datetime64(self, method, tz_naive_fixture): df = Series( [1, np.nan, 3], index=date_range("1/1/2000", periods=3, tz=tz_naive_fixture) ) - warn = None if method == "nearest" else FutureWarning - msg = "Series.interpolate with method=pad is deprecated" - with tm.assert_produces_warning(warn, match=msg): - result = df.interpolate(method=method) - if warn is not None: - # check the "use ffill instead" is equivalent - alt = df.ffill() - tm.assert_series_equal(result, alt) - expected = Series( - [1.0, 1.0, 3.0], - index=date_range("1/1/2000", periods=3, tz=tz_naive_fixture), - ) - tm.assert_series_equal(result, expected) + if method == "nearest": + result = df.interpolate(method=method) + expected = Series( + [1.0, 1.0, 3.0], + index=date_range("1/1/2000", periods=3, tz=tz_naive_fixture), + ) + tm.assert_series_equal(result, expected) + else: + msg = "Can not interpolate with method=pad" + with pytest.raises(ValueError, match=msg): + df.interpolate(method=method) def test_interp_pad_datetime64tz_values(self): # GH#27628 missing.interpolate_2d should handle datetimetz values @@ -671,16 +622,9 @@ def test_interp_pad_datetime64tz_values(self): ser = Series(dti) ser[1] = pd.NaT - msg = "Series.interpolate with method=pad is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg): - result = ser.interpolate(method="pad") - # check the "use ffill instead" is equivalent - alt = ser.ffill() - tm.assert_series_equal(result, alt) - - expected = Series(dti) - expected[1] = expected[0] - tm.assert_series_equal(result, expected) + msg = "Can not interpolate with method=pad" + with pytest.raises(ValueError, match=msg): + ser.interpolate(method="pad") def test_interp_limit_no_nans(self): # GH 7173
xref #53607 enforced deprecation of `NDFrame.interpolate` with `"ffill” / "bfill” / “pad” / “backfill"` methods
https://api.github.com/repos/pandas-dev/pandas/pulls/57869
2024-03-16T22:41:14Z
2024-03-22T20:30:20Z
2024-03-22T20:30:20Z
2024-03-22T22:00:26Z