title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
Backport PR #55365 on branch 2.1.x (BUG: Index.insert raising when inserting None into new string dtype)
diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index 090df1489e493..9c81956d14d77 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -22,6 +22,7 @@ Fixed regressions Bug fixes ~~~~~~~~~ - Fixed bug in :meth:`DataFrame.idxmin` and :meth:`DataFrame.idxmax` raising for arrow dtypes (:issue:`55368`) +- Fixed bug in :meth:`Index.insert` raising when inserting ``None`` into :class:`Index` with ``dtype="string[pyarrow_numpy]"`` (:issue:`55365`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py index 0b8d3d6b13f60..7e200dc8af374 100644 --- a/pandas/core/arrays/string_arrow.py +++ b/pandas/core/arrays/string_arrow.py @@ -579,3 +579,8 @@ def _reduce( ) else: return super()._reduce(name, skipna=skipna, keepdims=keepdims, **kwargs) + + def insert(self, loc: int, item) -> ArrowStringArrayNumpySemantics: + if item is np.nan: + item = libmissing.NA + return super().insert(loc, item) # type: ignore[return-value] diff --git a/pandas/tests/indexes/base_class/test_reshape.py b/pandas/tests/indexes/base_class/test_reshape.py index 5ecb2c753644d..6586f5f9de480 100644 --- a/pandas/tests/indexes/base_class/test_reshape.py +++ b/pandas/tests/indexes/base_class/test_reshape.py @@ -54,6 +54,14 @@ def test_insert_datetime_into_object(self, loc, val): tm.assert_index_equal(result, expected) assert type(expected[2]) is type(val) + def test_insert_none_into_string_numpy(self): + # GH#55365 + pytest.importorskip("pyarrow") + index = Index(["a", "b", "c"], dtype="string[pyarrow_numpy]") + result = index.insert(-1, None) + expected = Index(["a", "b", None, "c"], dtype="string[pyarrow_numpy]") + tm.assert_index_equal(result, expected) + @pytest.mark.parametrize( "pos,expected", [
#55365
https://api.github.com/repos/pandas-dev/pandas/pulls/55383
2023-10-03T20:09:19Z
2023-10-03T21:50:55Z
2023-10-03T21:50:55Z
2023-10-03T21:50:56Z
Backport PR #55347 on branch 2.1.x (BUG: interpolate raising wrong error for ea)
diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index 60b628c5d658c..93c3c5114c282 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -23,8 +23,10 @@ Bug fixes ~~~~~~~~~ - Fixed bug in :meth:`Categorical.equals` if other has arrow backed string dtype (:issue:`55364`) - Fixed bug in :meth:`DataFrame.idxmin` and :meth:`DataFrame.idxmax` raising for arrow dtypes (:issue:`55368`) +- Fixed bug in :meth:`DataFrame.interpolate` raising incorrect error message (:issue:`55347`) - Fixed bug in :meth:`Index.insert` raising when inserting ``None`` into :class:`Index` with ``dtype="string[pyarrow_numpy]"`` (:issue:`55365`) - Silence ``Period[B]`` warnings introduced by :issue:`53446` during normal plotting activity (:issue:`55138`) +- .. --------------------------------------------------------------------------- .. _whatsnew_212.other: diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index a21253db59323..bfd6ae361e1e8 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -889,7 +889,6 @@ def interpolate( limit, limit_direction, limit_area, - fill_value, copy: bool, **kwargs, ) -> Self: diff --git a/pandas/tests/frame/methods/test_interpolate.py b/pandas/tests/frame/methods/test_interpolate.py index 291a79815a81c..67aa07dd83764 100644 --- a/pandas/tests/frame/methods/test_interpolate.py +++ b/pandas/tests/frame/methods/test_interpolate.py @@ -497,3 +497,9 @@ def test_interpolate_empty_df(self): result = df.interpolate(inplace=True) assert result is None tm.assert_frame_equal(df, expected) + + def test_interpolate_ea_raise(self): + # GH#55347 + df = DataFrame({"a": [1, None, 2]}, dtype="Int64") + with pytest.raises(NotImplementedError, match="does not implement"): + df.interpolate()
#55347
https://api.github.com/repos/pandas-dev/pandas/pulls/55382
2023-10-03T20:08:52Z
2023-10-04T17:06:32Z
2023-10-04T17:06:32Z
2023-10-04T17:13:36Z
Backport PR #55364 on branch 2.1.x (BUG: eq not implemented for categorical and arrow backed strings)
diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index 090df1489e493..b6cacecfdc5f8 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -21,6 +21,7 @@ Fixed regressions Bug fixes ~~~~~~~~~ +- Fixed bug in :meth:`Categorical.equals` if other has arrow backed string dtype (:issue:`55364`) - Fixed bug in :meth:`DataFrame.idxmin` and :meth:`DataFrame.idxmax` raising for arrow dtypes (:issue:`55368`) - diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index 190fd8fd54e02..2e3ad8bc13091 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -32,6 +32,7 @@ from pandas.core.dtypes.cast import infer_dtype_from_scalar from pandas.core.dtypes.common import ( + CategoricalDtype, is_array_like, is_bool_dtype, is_integer, @@ -628,7 +629,9 @@ def __setstate__(self, state) -> None: def _cmp_method(self, other, op): pc_func = ARROW_CMP_FUNCS[op.__name__] - if isinstance(other, (ArrowExtensionArray, np.ndarray, list, BaseMaskedArray)): + if isinstance( + other, (ArrowExtensionArray, np.ndarray, list, BaseMaskedArray) + ) or isinstance(getattr(other, "dtype", None), CategoricalDtype): result = pc_func(self._pa_array, self._box_pa(other)) elif is_scalar(other): try: diff --git a/pandas/tests/indexes/categorical/test_equals.py b/pandas/tests/indexes/categorical/test_equals.py index 1ed8f3a903439..a8353f301a3c3 100644 --- a/pandas/tests/indexes/categorical/test_equals.py +++ b/pandas/tests/indexes/categorical/test_equals.py @@ -88,3 +88,9 @@ def test_equals_multiindex(self): ci = mi.to_flat_index().astype("category") assert not ci.equals(mi) + + def test_equals_string_dtype(self, any_string_dtype): + # GH#55364 + idx = CategoricalIndex(list("abc"), name="B") + other = Index(["a", "b", "c"], name="B", dtype=any_string_dtype) + assert idx.equals(other)
Backport PR #55364: BUG: eq not implemented for categorical and arrow backed strings
https://api.github.com/repos/pandas-dev/pandas/pulls/55381
2023-10-03T20:02:39Z
2023-10-03T21:51:31Z
2023-10-03T21:51:31Z
2023-10-03T21:51:31Z
CI: Exclude benchmarks directory when publishing docs
diff --git a/.github/workflows/docbuild-and-upload.yml b/.github/workflows/docbuild-and-upload.yml index deaf2be0a0423..af452363666b5 100644 --- a/.github/workflows/docbuild-and-upload.yml +++ b/.github/workflows/docbuild-and-upload.yml @@ -67,7 +67,7 @@ jobs: run: cp doc/cheatsheet/Pandas_Cheat_Sheet* web/build/ - name: Upload web - run: rsync -az --delete --exclude='pandas-docs' --exclude='docs' web/build/ web@${{ secrets.server_ip }}:/var/www/html + run: rsync -az --delete --exclude='pandas-docs' --exclude='docs' --exclude='benchmarks' web/build/ web@${{ secrets.server_ip }}:/var/www/html if: github.event_name == 'push' && github.ref == 'refs/heads/main' - name: Upload dev docs
- xref #55007 We're setting up the new benchmarks server to publish the asv results website to https://pandas.pydata.org/benchmarks. We need the exclude in this PR so updating the website doesn't delete the benchmarks directory.
https://api.github.com/repos/pandas-dev/pandas/pulls/55380
2023-10-03T19:07:16Z
2023-10-07T08:20:47Z
2023-10-07T08:20:47Z
2023-10-07T08:20:47Z
Backport PR #55138: BUG: Silence `Period[B]` warnings in plotting code
diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index ff2df2d4256b6..60b628c5d658c 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -24,7 +24,7 @@ Bug fixes - Fixed bug in :meth:`Categorical.equals` if other has arrow backed string dtype (:issue:`55364`) - Fixed bug in :meth:`DataFrame.idxmin` and :meth:`DataFrame.idxmax` raising for arrow dtypes (:issue:`55368`) - Fixed bug in :meth:`Index.insert` raising when inserting ``None`` into :class:`Index` with ``dtype="string[pyarrow_numpy]"`` (:issue:`55365`) -- +- Silence ``Period[B]`` warnings introduced by :issue:`53446` during normal plotting activity (:issue:`55138`) .. --------------------------------------------------------------------------- .. _whatsnew_212.other: diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py index cd7823ba15e44..be0ded0ecdf57 100644 --- a/pandas/plotting/_matplotlib/converter.py +++ b/pandas/plotting/_matplotlib/converter.py @@ -14,6 +14,7 @@ Final, cast, ) +import warnings import matplotlib.dates as mdates from matplotlib.ticker import ( @@ -243,18 +244,29 @@ def _convert_1d(values, units, axis): if not hasattr(axis, "freq"): raise TypeError("Axis must have `freq` set to convert to Periods") valid_types = (str, datetime, Period, pydt.date, pydt.time, np.datetime64) - if isinstance(values, valid_types) or is_integer(values) or is_float(values): - return get_datevalue(values, axis.freq) - elif isinstance(values, PeriodIndex): - return values.asfreq(axis.freq).asi8 - elif isinstance(values, Index): - return values.map(lambda x: get_datevalue(x, axis.freq)) - elif lib.infer_dtype(values, skipna=False) == "period": - # https://github.com/pandas-dev/pandas/issues/24304 - # convert ndarray[period] -> PeriodIndex - return PeriodIndex(values, freq=axis.freq).asi8 - elif isinstance(values, (list, tuple, np.ndarray, Index)): - return [get_datevalue(x, axis.freq) for x in values] + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", "Period with BDay freq is deprecated", category=FutureWarning + ) + warnings.filterwarnings( + "ignore", r"PeriodDtype\[B\] is deprecated", category=FutureWarning + ) + if ( + isinstance(values, valid_types) + or is_integer(values) + or is_float(values) + ): + return get_datevalue(values, axis.freq) + elif isinstance(values, PeriodIndex): + return values.asfreq(axis.freq).asi8 + elif isinstance(values, Index): + return values.map(lambda x: get_datevalue(x, axis.freq)) + elif lib.infer_dtype(values, skipna=False) == "period": + # https://github.com/pandas-dev/pandas/issues/24304 + # convert ndarray[period] -> PeriodIndex + return PeriodIndex(values, freq=axis.freq).asi8 + elif isinstance(values, (list, tuple, np.ndarray, Index)): + return [get_datevalue(x, axis.freq) for x in values] return values @@ -567,14 +579,30 @@ def _daily_finder(vmin, vmax, freq: BaseOffset): # save this for later usage vmin_orig = vmin - (vmin, vmax) = ( - Period(ordinal=int(vmin), freq=freq), - Period(ordinal=int(vmax), freq=freq), - ) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", "Period with BDay freq is deprecated", category=FutureWarning + ) + warnings.filterwarnings( + "ignore", r"PeriodDtype\[B\] is deprecated", category=FutureWarning + ) + (vmin, vmax) = ( + Period(ordinal=int(vmin), freq=freq), + Period(ordinal=int(vmax), freq=freq), + ) assert isinstance(vmin, Period) assert isinstance(vmax, Period) span = vmax.ordinal - vmin.ordinal + 1 - dates_ = period_range(start=vmin, end=vmax, freq=freq) + + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", "Period with BDay freq is deprecated", category=FutureWarning + ) + warnings.filterwarnings( + "ignore", r"PeriodDtype\[B\] is deprecated", category=FutureWarning + ) + dates_ = period_range(start=vmin, end=vmax, freq=freq) + # Initialize the output info = np.zeros( span, dtype=[("val", np.int64), ("maj", bool), ("min", bool), ("fmt", "|S20")] @@ -1072,7 +1100,13 @@ def __call__(self, x, pos: int = 0) -> str: fmt = self.formatdict.pop(x, "") if isinstance(fmt, np.bytes_): fmt = fmt.decode("utf-8") - period = Period(ordinal=int(x), freq=self.freq) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + "Period with BDay freq is deprecated", + category=FutureWarning, + ) + period = Period(ordinal=int(x), freq=self.freq) assert isinstance(period, Period) return period.strftime(fmt) diff --git a/pandas/tests/plotting/frame/test_frame.py b/pandas/tests/plotting/frame/test_frame.py index 11008646f0ad4..b97f1d64d57fd 100644 --- a/pandas/tests/plotting/frame/test_frame.py +++ b/pandas/tests/plotting/frame/test_frame.py @@ -2487,6 +2487,15 @@ def test_secondary_y(self, secondary_y): assert ax.get_ylim() == (0, 100) assert ax.get_yticks()[0] == 99 + @pytest.mark.slow + def test_plot_no_warning(self): + # GH 55138 + # TODO(3.0): this can be removed once Period[B] deprecation is enforced + df = tm.makeTimeDataFrame() + with tm.assert_produces_warning(False): + _ = df.plot() + _ = df.T.plot() + def _generate_4_axes_via_gridspec(): import matplotlib.pyplot as plt diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py index 6f0afab53c267..768fce023e6e0 100644 --- a/pandas/tests/plotting/test_series.py +++ b/pandas/tests/plotting/test_series.py @@ -973,3 +973,10 @@ def test_series_none_color(self): ax = series.plot(color=None) expected = _unpack_cycler(mpl.pyplot.rcParams)[:1] _check_colors(ax.get_lines(), linecolors=expected) + + @pytest.mark.slow + def test_plot_no_warning(self, ts): + # GH 55138 + # TODO(3.0): this can be removed once Period[B] deprecation is enforced + with tm.assert_produces_warning(False): + _ = ts.plot()
null
https://api.github.com/repos/pandas-dev/pandas/pulls/55378
2023-10-03T18:25:15Z
2023-10-04T00:26:36Z
2023-10-04T00:26:36Z
2023-10-04T00:26:39Z
Backport PR #55368 on branch 2.1.x (BUG: idxmin/max raising for arrow dtypes)
diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index 158cb51f05316..090df1489e493 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -21,7 +21,7 @@ Fixed regressions Bug fixes ~~~~~~~~~ -- +- Fixed bug in :meth:`DataFrame.idxmin` and :meth:`DataFrame.idxmax` raising for arrow dtypes (:issue:`55368`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index 49305128267be..190fd8fd54e02 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -30,6 +30,7 @@ from pandas.util._decorators import doc from pandas.util._validators import validate_fillna_kwargs +from pandas.core.dtypes.cast import infer_dtype_from_scalar from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, @@ -1595,13 +1596,21 @@ def _reduce( pa_result = self._reduce_pyarrow(name, skipna=skipna, **kwargs) if keepdims: - result = pa.array([pa_result.as_py()], type=pa_result.type) + if isinstance(pa_result, pa.Scalar): + result = pa.array([pa_result.as_py()], type=pa_result.type) + else: + result = pa.array( + [pa_result], + type=to_pyarrow_type(infer_dtype_from_scalar(pa_result)[0]), + ) return type(self)(result) if pc.is_null(pa_result).as_py(): return self.dtype.na_value - else: + elif isinstance(pa_result, pa.Scalar): return pa_result.as_py() + else: + return pa_result def _explode(self): """ diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py index e7b6a0c0b39b0..e9c69c9d2df52 100644 --- a/pandas/tests/frame/test_reductions.py +++ b/pandas/tests/frame/test_reductions.py @@ -1056,6 +1056,19 @@ def test_idxmax_numeric_only(self, numeric_only): expected = Series([1, 0, 1], index=["a", "b", "c"]) tm.assert_series_equal(result, expected) + def test_idxmax_arrow_types(self): + # GH#55368 + pytest.importorskip("pyarrow") + + df = DataFrame({"a": [2, 3, 1], "b": [2, 1, 1]}, dtype="int64[pyarrow]") + result = df.idxmax() + expected = Series([1, 0], index=["a", "b"]) + tm.assert_series_equal(result, expected) + + result = df.idxmin() + expected = Series([2, 1], index=["a", "b"]) + tm.assert_series_equal(result, expected) + def test_idxmax_axis_2(self, float_frame): frame = float_frame msg = "No axis named 2 for object type DataFrame"
#55368
https://api.github.com/repos/pandas-dev/pandas/pulls/55377
2023-10-03T17:54:34Z
2023-10-03T20:00:59Z
2023-10-03T20:00:58Z
2023-10-03T20:06:06Z
Backport PR #55363 on branch 2.1.x (BUG: ndim of string block incorrect with string inference)
diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 05aa9f38bb2b0..8bb6c6b5de7ea 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -383,7 +383,7 @@ def ndarray_to_mgr( new_block( dtype.construct_array_type()._from_sequence(data, dtype=dtype), BlockPlacement(slice(i, i + 1)), - ndim=1, + ndim=2, ) for i, data in enumerate(obj_columns) ] diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index a8ab3ce1ba014..e1aa682d763a9 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -2743,6 +2743,13 @@ def test_frame_string_inference_array_string_dtype(self): df = DataFrame(np.array([["a", "c"], ["b", "d"]]), columns=["a", "b"]) tm.assert_frame_equal(df, expected) + def test_frame_string_inference_block_dim(self): + # GH#55363 + pytest.importorskip("pyarrow") + with pd.option_context("future.infer_string", True): + df = DataFrame(np.array([["hello", "goodbye"], ["hello", "Hello"]])) + assert df._mgr.blocks[0].ndim == 2 + class TestDataFrameConstructorIndexInference: def test_frame_from_dict_of_series_overlapping_monthly_period_indexes(self):
Backport PR #55363: BUG: ndim of string block incorrect with string inference
https://api.github.com/repos/pandas-dev/pandas/pulls/55376
2023-10-03T16:55:20Z
2023-10-03T20:01:09Z
2023-10-03T20:01:09Z
2023-10-03T20:01:09Z
DOC: Update Issue Triage Instructions
diff --git a/doc/source/development/maintaining.rst b/doc/source/development/maintaining.rst index b49c9644e1b2a..29cc256f35a4e 100644 --- a/doc/source/development/maintaining.rst +++ b/doc/source/development/maintaining.rst @@ -44,6 +44,9 @@ reading. Issue triage ------------ +Triage is an important first step in addressing issues reported by the community, and even +partial contributions are a great way to help maintain pandas. Only remove the "Needs Triage" +tag once all of the steps below have been completed. Here's a typical workflow for triaging a newly opened issue. @@ -67,9 +70,9 @@ Here's a typical workflow for triaging a newly opened issue. 3. **Is this a duplicate issue?** We have many open issues. If a new issue is clearly a duplicate, label the - new issue as "Duplicate" assign the milestone "No Action", and close the issue - with a link to the original issue. Make sure to still thank the reporter, and - encourage them to chime in on the original issue, and perhaps try to fix it. + new issue as "Duplicate" and close the issue with a link to the original issue. + Make sure to still thank the reporter, and encourage them to chime in on the + original issue, and perhaps try to fix it. If the new issue provides relevant information, such as a better or slightly different example, add it to the original issue as a comment or an edit to @@ -90,6 +93,10 @@ Here's a typical workflow for triaging a newly opened issue. If a reproducible example is provided, but you see a simplification, edit the original post with your simpler reproducible example. + Ensure the issue exists on the main branch and that it has the "Needs Triage" tag + until all steps have been completed. Add a comment to the issue once you have + verified it exists on the main branch, so others know it has been confirmed. + 5. **Is this a clearly defined feature request?** Generally, pandas prefers to discuss and design new features in issues, before @@ -97,8 +104,9 @@ Here's a typical workflow for triaging a newly opened issue. for the new feature. Having them write a full docstring is a good way to pin down specifics. - We'll need a discussion from several pandas maintainers before deciding whether - the proposal is in scope for pandas. + Tag new feature requests with "Needs Discussion", as we'll need a discussion + from several pandas maintainers before deciding whether the proposal is in + scope for pandas. 6. **Is this a usage question?** @@ -117,10 +125,6 @@ Here's a typical workflow for triaging a newly opened issue. If the issue is clearly defined and the fix seems relatively straightforward, label the issue as "Good first issue". - Typically, new issues will be assigned the "Contributions welcome" milestone, - unless it's know that this issue should be addressed in a specific release (say - because it's a large regression). - Once you have completed the above, make sure to remove the "needs triage" label. .. _maintaining.regressions:
The following changes were applied to `doc/source/development/maintaining.rst` - Encouraged partial contributions to triaging process at the start of the section. - Removed the 'No Action' milestone. - Specified when to remove the 'Needs Triage' tag. - Explained the need for 'Needs Discussion' tag on feature requests. - Removed Contributions Welcome milestone. - Closes #55311
https://api.github.com/repos/pandas-dev/pandas/pulls/55373
2023-10-03T15:45:16Z
2023-10-07T14:22:52Z
2023-10-07T14:22:52Z
2023-10-07T15:21:07Z
BUG: DataFrame.join reordering index levels when joining on subset of levels
diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index fa3cef6d9457d..620dd8c3c0c37 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -133,10 +133,36 @@ and ``sort=False``: result -.. _whatsnew_220.notable_bug_fixes.notable_bug_fix2: +.. _whatsnew_220.notable_bug_fixes.multiindex_join_different_levels: -notable_bug_fix2 -^^^^^^^^^^^^^^^^ +:func:`merge` and :meth:`DataFrame.join` no longer reorder levels when levels differ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In previous versions of pandas, :func:`merge` and :meth:`DataFrame.join` would reorder +index levels when joining on two indexes with different levels (:issue:`34133`). + +.. ipython:: python + + left = pd.DataFrame({"left": 1}, index=pd.MultiIndex.from_tuples([("x", 1), ("x", 2)], names=["A", "B"])) + right = pd.DataFrame({"right": 2}, index=pd.MultiIndex.from_tuples([(1, 1), (2, 2)], names=["B", "C"])) + result = left.join(right) + +*Old Behavior* + +.. code-block:: ipython + + In [5]: result + Out[5]: + left right + B A C + 1 x 1 1 2 + 2 x 2 1 2 + +*New Behavior* + +.. ipython:: python + + result .. --------------------------------------------------------------------------- .. _whatsnew_220.api_breaking: @@ -341,6 +367,7 @@ Reshaping ^^^^^^^^^ - Bug in :func:`concat` ignoring ``sort`` parameter when passed :class:`DatetimeIndex` indexes (:issue:`54769`) - Bug in :func:`merge` returning columns in incorrect order when left and/or right is empty (:issue:`51929`) +- Sparse ^^^^^^ diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 9017ff121976b..11f2cc8ebf1ff 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4725,6 +4725,13 @@ def _join_multi(self, other: Index, how: JoinHow): multi_join_idx = multi_join_idx.remove_unused_levels() + # maintain the order of the index levels + if how == "right": + level_order = other_names_list + ldrop_names + else: + level_order = self_names_list + rdrop_names + multi_join_idx = multi_join_idx.reorder_levels(level_order) + return multi_join_idx, lidx, ridx jl = next(iter(overlap)) diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py index 20daa388c2c88..c630ba6a43cb1 100644 --- a/pandas/tests/reshape/merge/test_join.py +++ b/pandas/tests/reshape/merge/test_join.py @@ -902,7 +902,7 @@ def test_join_inner_multiindex_deterministic_order(): result = left.join(right, how="inner") expected = DataFrame( {"e": [5], "f": [6]}, - index=MultiIndex.from_tuples([(2, 1, 4, 3)], names=("b", "a", "d", "c")), + index=MultiIndex.from_tuples([(1, 2, 4, 3)], names=("a", "b", "d", "c")), ) tm.assert_frame_equal(result, expected) @@ -926,10 +926,16 @@ def test_join_multiindex_one_level(join_type): ) right = DataFrame(data={"d": 4}, index=MultiIndex.from_tuples([(2,)], names=("b",))) result = left.join(right, how=join_type) - expected = DataFrame( - {"c": [3], "d": [4]}, - index=MultiIndex.from_tuples([(2, 1)], names=["b", "a"]), - ) + if join_type == "right": + expected = DataFrame( + {"c": [3], "d": [4]}, + index=MultiIndex.from_tuples([(2, 1)], names=["b", "a"]), + ) + else: + expected = DataFrame( + {"c": [3], "d": [4]}, + index=MultiIndex.from_tuples([(1, 2)], names=["a", "b"]), + ) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/reshape/merge/test_multi.py b/pandas/tests/reshape/merge/test_multi.py index ab010bdb909f1..c029acf0c8938 100644 --- a/pandas/tests/reshape/merge/test_multi.py +++ b/pandas/tests/reshape/merge/test_multi.py @@ -69,11 +69,6 @@ def on_cols_multi(): return ["Origin", "Destination", "Period"] -@pytest.fixture -def idx_cols_multi(): - return ["Origin", "Destination", "Period", "TripPurp", "LinkType"] - - class TestMergeMulti: def test_merge_on_multikey(self, left, right, join_type): on_cols = ["key1", "key2"] @@ -815,9 +810,13 @@ def test_join_multi_levels2(self): class TestJoinMultiMulti: - def test_join_multi_multi( - self, left_multi, right_multi, join_type, on_cols_multi, idx_cols_multi - ): + def test_join_multi_multi(self, left_multi, right_multi, join_type, on_cols_multi): + left_names = left_multi.index.names + right_names = right_multi.index.names + if join_type == "right": + level_order = right_names + left_names.difference(right_names) + else: + level_order = left_names + right_names.difference(left_names) # Multi-index join tests expected = ( merge( @@ -826,7 +825,7 @@ def test_join_multi_multi( how=join_type, on=on_cols_multi, ) - .set_index(idx_cols_multi) + .set_index(level_order) .sort_index() ) @@ -834,11 +833,18 @@ def test_join_multi_multi( tm.assert_frame_equal(result, expected) def test_join_multi_empty_frames( - self, left_multi, right_multi, join_type, on_cols_multi, idx_cols_multi + self, left_multi, right_multi, join_type, on_cols_multi ): left_multi = left_multi.drop(columns=left_multi.columns) right_multi = right_multi.drop(columns=right_multi.columns) + left_names = left_multi.index.names + right_names = right_multi.index.names + if join_type == "right": + level_order = right_names + left_names.difference(right_names) + else: + level_order = left_names + right_names.difference(left_names) + expected = ( merge( left_multi.reset_index(), @@ -846,7 +852,7 @@ def test_join_multi_empty_frames( how=join_type, on=on_cols_multi, ) - .set_index(idx_cols_multi) + .set_index(level_order) .sort_index() ) diff --git a/pandas/tests/series/methods/test_align.py b/pandas/tests/series/methods/test_align.py index e1b3dd4888ef5..d36fd5335bdfc 100644 --- a/pandas/tests/series/methods/test_align.py +++ b/pandas/tests/series/methods/test_align.py @@ -240,10 +240,10 @@ def test_align_left_different_named_levels(): result_left, result_right = left.align(right) expected_left = Series( - [2], index=pd.MultiIndex.from_tuples([(1, 3, 4, 2)], names=["a", "c", "d", "b"]) + [2], index=pd.MultiIndex.from_tuples([(1, 4, 3, 2)], names=["a", "d", "c", "b"]) ) expected_right = Series( - [1], index=pd.MultiIndex.from_tuples([(1, 3, 4, 2)], names=["a", "c", "d", "b"]) + [1], index=pd.MultiIndex.from_tuples([(1, 4, 3, 2)], names=["a", "d", "c", "b"]) ) tm.assert_series_equal(result_left, expected_left) tm.assert_series_equal(result_right, expected_right) diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py index 8547fd6988791..d40ff6c139653 100644 --- a/pandas/tests/series/test_arithmetic.py +++ b/pandas/tests/series/test_arithmetic.py @@ -941,8 +941,8 @@ def test_series_varied_multiindex_alignment(): expected = Series( [1000, 2001, 3002, 4003], index=pd.MultiIndex.from_tuples( - [("x", 1, "a"), ("x", 2, "a"), ("y", 1, "a"), ("y", 2, "a")], - names=["xy", "num", "ab"], + [("a", "x", 1), ("a", "x", 2), ("a", "y", 1), ("a", "y", 2)], + names=["ab", "xy", "num"], ), ) tm.assert_series_equal(result, expected)
- [x] closes #34133 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/v2.2.0.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55370
2023-10-03T11:36:50Z
2023-10-04T00:29:44Z
2023-10-04T00:29:44Z
2023-11-16T12:56:49Z
MAINT: Remove `np.int_` and `np.uint`
diff --git a/doc/source/user_guide/enhancingperf.rst b/doc/source/user_guide/enhancingperf.rst index bc2f4420da784..c4721f3a6b09c 100644 --- a/doc/source/user_guide/enhancingperf.rst +++ b/doc/source/user_guide/enhancingperf.rst @@ -184,7 +184,7 @@ can be improved by passing an ``np.ndarray``. ...: cpdef np.ndarray[double] apply_integrate_f(np.ndarray col_a, np.ndarray col_b, ...: np.ndarray col_N): ...: assert (col_a.dtype == np.float64 - ...: and col_b.dtype == np.float64 and col_N.dtype == np.int_) + ...: and col_b.dtype == np.float64 and col_N.dtype == np.dtype(int)) ...: cdef Py_ssize_t i, n = len(col_N) ...: assert (len(col_a) == len(col_b) == n) ...: cdef np.ndarray[double] res = np.empty(n) diff --git a/pandas/compat/numpy/__init__.py b/pandas/compat/numpy/__init__.py index d376fa4c1919e..1b974a92f8188 100644 --- a/pandas/compat/numpy/__init__.py +++ b/pandas/compat/numpy/__init__.py @@ -21,6 +21,21 @@ ) +np_long: type +np_ulong: type + +if _nlv >= Version("2.0.0.dev0"): + try: + np_long = np.long # type: ignore[attr-defined] + np_ulong = np.ulong # type: ignore[attr-defined] + except AttributeError: + np_long = np.int_ + np_ulong = np.uint +else: + np_long = np.int_ + np_ulong = np.uint + + __all__ = [ "np", "_np_version", diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index c952178f4c998..fbff2a552bac3 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -1641,7 +1641,7 @@ def safe_sort( else: mask = None else: - reverse_indexer = np.empty(len(sorter), dtype=np.int_) + reverse_indexer = np.empty(len(sorter), dtype=int) reverse_indexer.put(sorter, np.arange(len(sorter))) # Out of bound indices will be masked with `-1` next, so we # may deal with them here without performance loss using `mode='wrap'` diff --git a/pandas/tests/arrays/boolean/test_reduction.py b/pandas/tests/arrays/boolean/test_reduction.py index dd8c3eda9ed05..71156a4d84ae5 100644 --- a/pandas/tests/arrays/boolean/test_reduction.py +++ b/pandas/tests/arrays/boolean/test_reduction.py @@ -1,6 +1,8 @@ import numpy as np import pytest +from pandas.compat.numpy import np_long + import pandas as pd @@ -51,7 +53,7 @@ def test_reductions_return_types(dropna, data, all_numeric_reductions): s = s.dropna() if op in ("sum", "prod"): - assert isinstance(getattr(s, op)(), np.int_) + assert isinstance(getattr(s, op)(), np_long) elif op == "count": # Oddly on the 32 bit build (but not Windows), this is intc (!= intp) assert isinstance(getattr(s, op)(), np.integer) diff --git a/pandas/tests/dtypes/cast/test_infer_dtype.py b/pandas/tests/dtypes/cast/test_infer_dtype.py index ed08df74461ef..50eaa1f4d8713 100644 --- a/pandas/tests/dtypes/cast/test_infer_dtype.py +++ b/pandas/tests/dtypes/cast/test_infer_dtype.py @@ -170,7 +170,7 @@ def test_infer_dtype_from_scalar(value, expected): @pytest.mark.parametrize( "arr, expected", [ - ([1], np.int_), + ([1], np.dtype(int)), (np.array([1], dtype=np.int64), np.int64), ([np.nan, 1, ""], np.object_), (np.array([[1.0, 2.0]]), np.float64), diff --git a/pandas/tests/extension/base/dim2.py b/pandas/tests/extension/base/dim2.py index 3d1274df0a21b..12006248b1db3 100644 --- a/pandas/tests/extension/base/dim2.py +++ b/pandas/tests/extension/base/dim2.py @@ -248,7 +248,7 @@ def get_reduction_result_dtype(dtype): return NUMPY_INT_TO_DTYPE[np.dtype(int)] else: # i.e. dtype.kind == "u" - return NUMPY_INT_TO_DTYPE[np.dtype(np.uint)] + return NUMPY_INT_TO_DTYPE[np.dtype("uint")] if method in ["sum", "prod"]: # std and var are not dtype-preserving diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py index 370cbf0f33174..bc48d00c4a8a8 100644 --- a/pandas/tests/frame/indexing/test_indexing.py +++ b/pandas/tests/frame/indexing/test_indexing.py @@ -108,11 +108,11 @@ def test_setitem_list(self, float_frame): data["A"] = newcolumndata def test_setitem_list2(self): - df = DataFrame(0, index=range(3), columns=["tt1", "tt2"], dtype=np.int_) + df = DataFrame(0, index=range(3), columns=["tt1", "tt2"], dtype=int) df.loc[1, ["tt1", "tt2"]] = [1, 2] result = df.loc[df.index[1], ["tt1", "tt2"]] - expected = Series([1, 2], df.columns, dtype=np.int_, name=1) + expected = Series([1, 2], df.columns, dtype=int, name=1) tm.assert_series_equal(result, expected) df["tt1"] = df["tt2"] = "0" diff --git a/pandas/tests/frame/methods/test_shift.py b/pandas/tests/frame/methods/test_shift.py index 60c05767a5e1a..6bd441400dc54 100644 --- a/pandas/tests/frame/methods/test_shift.py +++ b/pandas/tests/frame/methods/test_shift.py @@ -1,6 +1,7 @@ import numpy as np import pytest +from pandas.compat.numpy import np_long import pandas.util._test_decorators as td import pandas as pd @@ -471,22 +472,22 @@ def test_shift_axis1_multiple_blocks_with_int_fill(self): df1 = DataFrame(rng.integers(1000, size=(5, 3), dtype=int)) df2 = DataFrame(rng.integers(1000, size=(5, 2), dtype=int)) df3 = pd.concat([df1.iloc[:4, 1:3], df2.iloc[:4, :]], axis=1) - result = df3.shift(2, axis=1, fill_value=np.int_(0)) + result = df3.shift(2, axis=1, fill_value=np_long(0)) assert len(df3._mgr.blocks) == 2 expected = df3.take([-1, -1, 0, 1], axis=1) - expected.iloc[:, :2] = np.int_(0) + expected.iloc[:, :2] = np_long(0) expected.columns = df3.columns tm.assert_frame_equal(result, expected) # Case with periods < 0 df3 = pd.concat([df1.iloc[:4, 1:3], df2.iloc[:4, :]], axis=1) - result = df3.shift(-2, axis=1, fill_value=np.int_(0)) + result = df3.shift(-2, axis=1, fill_value=np_long(0)) assert len(df3._mgr.blocks) == 2 expected = df3.take([2, 3, -1, -1], axis=1) - expected.iloc[:, -2:] = np.int_(0) + expected.iloc[:, -2:] = np_long(0) expected.columns = df3.columns tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index fd851ab244cb8..e2dfa98c2365a 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -1823,7 +1823,7 @@ def test_constructor_single_value(self): DataFrame("a", [1, 2], ["a", "c"], float) def test_constructor_with_datetimes(self): - intname = np.dtype(np.int_).name + intname = np.dtype(int).name floatname = np.dtype(np.float64).name objectname = np.dtype(np.object_).name diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py index e66557f132c1d..84777a58a839d 100644 --- a/pandas/tests/frame/test_reductions.py +++ b/pandas/tests/frame/test_reductions.py @@ -10,6 +10,10 @@ IS64, is_platform_windows, ) +from pandas.compat.numpy import ( + np_long, + np_ulong, +) import pandas.util._test_decorators as td import pandas as pd @@ -1700,11 +1704,11 @@ class TestEmptyDataFrameReductions: "opname, dtype, exp_value, exp_dtype", [ ("sum", np.int8, 0, np.int64), - ("prod", np.int8, 1, np.int_), + ("prod", np.int8, 1, np_long), ("sum", np.int64, 0, np.int64), ("prod", np.int64, 1, np.int64), ("sum", np.uint8, 0, np.uint64), - ("prod", np.uint8, 1, np.uint), + ("prod", np.uint8, 1, np_ulong), ("sum", np.uint64, 0, np.uint64), ("prod", np.uint64, 1, np.uint64), ("sum", np.float32, 0, np.float32), diff --git a/pandas/tests/groupby/aggregate/test_cython.py b/pandas/tests/groupby/aggregate/test_cython.py index 865fda0ab54a2..5c99882cef6d2 100644 --- a/pandas/tests/groupby/aggregate/test_cython.py +++ b/pandas/tests/groupby/aggregate/test_cython.py @@ -229,7 +229,7 @@ def test_cython_agg_empty_buckets_nanops(observed): # GH-18869 can't call nanops on empty groups, so hardcode expected # for these df = DataFrame([11, 12, 13], columns=["a"]) - grps = np.arange(0, 25, 5, dtype=np.int_) + grps = np.arange(0, 25, 5, dtype=int) # add / sum result = df.groupby(pd.cut(df["a"], grps), observed=observed)._cython_agg_general( "sum", alt=None, numeric_only=True diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index 41bbfcf6840a9..a92880c87b847 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -824,7 +824,7 @@ def test_nlargest_and_smallest_noop(data, groups, dtype, method): data = list(reversed(data)) ser = Series(data, name="a") result = getattr(ser.groupby(groups), method)(n=2) - expidx = np.array(groups, dtype=np.int_) if isinstance(groups, list) else groups + expidx = np.array(groups, dtype=int) if isinstance(groups, list) else groups expected = Series(data, index=MultiIndex.from_arrays([expidx, ser.index]), name="a") tm.assert_series_equal(result, expected) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 9132be50d5857..4ca8b0e317bd2 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -3004,12 +3004,12 @@ def test_groupby_reduce_period(): res = gb.max() expected = ser[-10:] - expected.index = Index(range(10), dtype=np.int_) + expected.index = Index(range(10), dtype=int) tm.assert_series_equal(res, expected) res = gb.min() expected = ser[:10] - expected.index = Index(range(10), dtype=np.int_) + expected.index = Index(range(10), dtype=int) tm.assert_series_equal(res, expected) diff --git a/pandas/tests/groupby/test_min_max.py b/pandas/tests/groupby/test_min_max.py index 37eb52be0b37b..30c7e1df1e691 100644 --- a/pandas/tests/groupby/test_min_max.py +++ b/pandas/tests/groupby/test_min_max.py @@ -108,7 +108,7 @@ def test_max_inat_not_all_na(): # Note: in converting to float64, the iNaT + 1 maps to iNaT, i.e. is lossy expected = Series({1: np.nan, 2: np.nan, 3: iNaT + 1}) - expected.index = expected.index.astype(np.int_) + expected.index = expected.index.astype(int) tm.assert_series_equal(result, expected, check_exact=True) diff --git a/pandas/tests/groupby/test_quantile.py b/pandas/tests/groupby/test_quantile.py index efe7b171d630d..805fef2125fda 100644 --- a/pandas/tests/groupby/test_quantile.py +++ b/pandas/tests/groupby/test_quantile.py @@ -468,7 +468,7 @@ def test_groupby_quantile_dt64tz_period(): # Check that we match the group-by-group result exp = {i: df.iloc[i::5].quantile(0.5) for i in range(5)} expected = DataFrame(exp).T.infer_objects() - expected.index = expected.index.astype(np.int_) + expected.index = expected.index.astype(int) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/test_size.py b/pandas/tests/groupby/test_size.py index 76b26c04f9f3a..c275db9d1788c 100644 --- a/pandas/tests/groupby/test_size.py +++ b/pandas/tests/groupby/test_size.py @@ -39,7 +39,7 @@ def test_size_axis_1(df, axis_1, by, sort, dropna): if sort: expected = expected.sort_index() if is_integer_dtype(expected.index.dtype) and not any(x is None for x in by): - expected.index = expected.index.astype(np.int_) + expected.index = expected.index.astype(int) msg = "DataFrame.groupby with axis=1 is deprecated" with tm.assert_produces_warning(FutureWarning, match=msg): diff --git a/pandas/tests/groupby/test_value_counts.py b/pandas/tests/groupby/test_value_counts.py index 944dda8977882..45a33d3b70f71 100644 --- a/pandas/tests/groupby/test_value_counts.py +++ b/pandas/tests/groupby/test_value_counts.py @@ -996,7 +996,7 @@ def test_mixed_groupings(normalize, expected_label, expected_values): result = gp.value_counts(sort=True, normalize=normalize) expected = DataFrame( { - "level_0": np.array([4, 4, 5], dtype=np.int_), + "level_0": np.array([4, 4, 5], dtype=int), "A": [1, 1, 2], "level_2": [8, 8, 7], "B": [1, 3, 2], diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index f44cbbf560584..d08757a206e67 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -4,6 +4,8 @@ import numpy as np import pytest +from pandas.compat.numpy import np_long + import pandas as pd from pandas import ( DataFrame, @@ -54,7 +56,7 @@ def test_time_overflow_for_32bit_machines(self): # (which has value 1e9) and since the max value for np.int32 is ~2e9, # and since those machines won't promote np.int32 to np.int64, we get # overflow. - periods = np.int_(1000) + periods = np_long(1000) idx1 = date_range(start="2000", periods=periods, freq="s") assert len(idx1) == periods diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py index c3944a4443d67..d877110e72b26 100644 --- a/pandas/tests/indexes/datetimes/test_indexing.py +++ b/pandas/tests/indexes/datetimes/test_indexing.py @@ -8,6 +8,8 @@ import numpy as np import pytest +from pandas.compat.numpy import np_long + import pandas as pd from pandas import ( DatetimeIndex, @@ -91,7 +93,7 @@ def test_dti_business_getitem(self, freq): assert fancy_indexed.freq is None # 32-bit vs. 64-bit platforms - assert rng[4] == rng[np.int_(4)] + assert rng[4] == rng[np_long(4)] @pytest.mark.parametrize("freq", ["B", "C"]) def test_dti_business_getitem_matplotlib_hackaround(self, freq): diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index bc04c1c6612f4..8fd1e296fb79a 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -457,7 +457,7 @@ def test_fancy(self, simple_index): ["string", "int64", "int32", "uint64", "uint32", "float64", "float32"], indirect=True, ) - @pytest.mark.parametrize("dtype", [np.int_, np.bool_]) + @pytest.mark.parametrize("dtype", [int, np.bool_]) def test_empty_fancy(self, index, dtype): empty_arr = np.array([], dtype=dtype) empty_index = type(index)([], dtype=index.dtype) diff --git a/pandas/tests/indexing/multiindex/test_partial.py b/pandas/tests/indexing/multiindex/test_partial.py index de989ad550f2b..081da385ebcc3 100644 --- a/pandas/tests/indexing/multiindex/test_partial.py +++ b/pandas/tests/indexing/multiindex/test_partial.py @@ -166,7 +166,7 @@ def test_getitem_intkey_leading_level( mi = ser.index assert isinstance(mi, MultiIndex) if dtype is int: - assert mi.levels[0].dtype == np.int_ + assert mi.levels[0].dtype == np.dtype(int) else: assert mi.levels[0].dtype == np.float64 diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index a2693c85e507f..20568342222c3 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -442,7 +442,7 @@ def test_loc_to_fail(self): ) msg = ( - rf"\"None of \[Index\(\[1, 2\], dtype='{np.int_().dtype}'\)\] are " + rf"\"None of \[Index\(\[1, 2\], dtype='{np.dtype(int)}'\)\] are " r"in the \[index\]\"" ) with pytest.raises(KeyError, match=msg): @@ -460,7 +460,7 @@ def test_loc_to_fail2(self): s.loc[-1] msg = ( - rf"\"None of \[Index\(\[-1, -2\], dtype='{np.int_().dtype}'\)\] are " + rf"\"None of \[Index\(\[-1, -2\], dtype='{np.dtype(int)}'\)\] are " r"in the \[index\]\"" ) with pytest.raises(KeyError, match=msg): @@ -476,7 +476,7 @@ def test_loc_to_fail2(self): s["a"] = 2 msg = ( - rf"\"None of \[Index\(\[-2\], dtype='{np.int_().dtype}'\)\] are " + rf"\"None of \[Index\(\[-2\], dtype='{np.dtype(int)}'\)\] are " r"in the \[index\]\"" ) with pytest.raises(KeyError, match=msg): @@ -493,7 +493,7 @@ def test_loc_to_fail3(self): df = DataFrame([["a"], ["b"]], index=[1, 2], columns=["value"]) msg = ( - rf"\"None of \[Index\(\[3\], dtype='{np.int_().dtype}'\)\] are " + rf"\"None of \[Index\(\[3\], dtype='{np.dtype(int)}'\)\] are " r"in the \[index\]\"" ) with pytest.raises(KeyError, match=msg): @@ -510,7 +510,7 @@ def test_loc_getitem_list_with_fail(self): s.loc[[2]] - msg = f"\"None of [Index([3], dtype='{np.int_().dtype}')] are in the [index]" + msg = f"\"None of [Index([3], dtype='{np.dtype(int)}')] are in the [index]" with pytest.raises(KeyError, match=re.escape(msg)): s.loc[[3]] @@ -1209,7 +1209,7 @@ def test_loc_setitem_empty_append_raises(self): df = DataFrame(columns=["x", "y"]) df.index = df.index.astype(np.int64) msg = ( - rf"None of \[Index\(\[0, 1\], dtype='{np.int_().dtype}'\)\] " + rf"None of \[Index\(\[0, 1\], dtype='{np.dtype(int)}'\)\] " r"are in the \[index\]" ) with pytest.raises(KeyError, match=msg): diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py index 8e5cde42ec91b..8f499644f1013 100644 --- a/pandas/tests/indexing/test_partial.py +++ b/pandas/tests/indexing/test_partial.py @@ -402,7 +402,7 @@ def test_series_partial_set(self): # raises as nothing is in the index msg = ( - rf"\"None of \[Index\(\[3, 3, 3\], dtype='{np.int_().dtype}'\)\] " + rf"\"None of \[Index\(\[3, 3, 3\], dtype='{np.dtype(int)}'\)\] " r"are in the \[index\]\"" ) with pytest.raises(KeyError, match=msg): @@ -483,7 +483,7 @@ def test_series_partial_set_with_name(self): # raises as nothing is in the index msg = ( - rf"\"None of \[Index\(\[3, 3, 3\], dtype='{np.int_().dtype}', " + rf"\"None of \[Index\(\[3, 3, 3\], dtype='{np.dtype(int)}', " r"name='idx'\)\] are in the \[index\]\"" ) with pytest.raises(KeyError, match=msg): diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 597bc2975268e..09567e7012313 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -468,7 +468,7 @@ def test_set_change_dtype(self, mgr): np.random.default_rng(2).standard_normal(N).astype(int), ) idx = mgr2.items.get_loc("quux") - assert mgr2.iget(idx).dtype == np.int_ + assert mgr2.iget(idx).dtype == np.dtype(int) mgr2.iset( mgr2.items.get_loc("quux"), np.random.default_rng(2).standard_normal(N) diff --git a/pandas/tests/io/parser/test_parse_dates.py b/pandas/tests/io/parser/test_parse_dates.py index c79fdd9145a6a..9f7840588f89e 100644 --- a/pandas/tests/io/parser/test_parse_dates.py +++ b/pandas/tests/io/parser/test_parse_dates.py @@ -47,7 +47,7 @@ def test_read_csv_with_custom_date_parser(all_parsers): # GH36111 def __custom_date_parser(time): time = time.astype(np.float64) - time = time.astype(np.int_) # convert float seconds to int type + time = time.astype(int) # convert float seconds to int type return pd.to_timedelta(time, unit="s") testdata = StringIO( @@ -87,7 +87,7 @@ def test_read_csv_with_custom_date_parser_parse_dates_false(all_parsers): # GH44366 def __custom_date_parser(time): time = time.astype(np.float64) - time = time.astype(np.int_) # convert float seconds to int type + time = time.astype(int) # convert float seconds to int type return pd.to_timedelta(time, unit="s") testdata = StringIO( diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py index e77dc0b305171..89afe2306afbc 100644 --- a/pandas/tests/plotting/test_series.py +++ b/pandas/tests/plotting/test_series.py @@ -6,7 +6,10 @@ import pytest from pandas.compat import is_platform_linux -from pandas.compat.numpy import np_version_gte1p24 +from pandas.compat.numpy import ( + np_long, + np_version_gte1p24, +) import pandas.util._test_decorators as td import pandas as pd @@ -561,7 +564,7 @@ def test_plot_fails_with_dupe_color_and_style(self): [ ["scott", 20], [None, 20], - [None, np.int_(20)], + [None, np_long(20)], [0.5, np.linspace(-100, 100, 20)], ], ) diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index d889ae2e4806b..d203a04a7fffc 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -365,7 +365,7 @@ def test_merge_join_key_dtype_cast(self): lkey = np.array([1]) rkey = np.array([2]) df = merge(df1, df2, left_on=lkey, right_on=rkey, how="outer") - assert df["key_0"].dtype == np.int_ + assert df["key_0"].dtype == np.dtype(int) def test_handle_join_key_pass_array(self): left = DataFrame( @@ -389,7 +389,7 @@ def test_handle_join_key_pass_array(self): rkey = np.array([1, 1, 2, 3, 4, 5]) merged = merge(left, right, left_on=lkey, right_on=rkey, how="outer") - expected = Series([1, 1, 1, 1, 2, 2, 3, 4, 5], dtype=np.int_, name="key_0") + expected = Series([1, 1, 1, 1, 2, 2, 3, 4, 5], dtype=int, name="key_0") tm.assert_series_equal(merged["key_0"], expected) left = DataFrame({"value": np.arange(3)}) diff --git a/pandas/tests/scalar/test_na_scalar.py b/pandas/tests/scalar/test_na_scalar.py index 287b7557f50f9..44ce5c79db348 100644 --- a/pandas/tests/scalar/test_na_scalar.py +++ b/pandas/tests/scalar/test_na_scalar.py @@ -9,6 +9,7 @@ import pytest from pandas._libs.missing import NA +from pandas.compat.numpy import np_long from pandas.core.dtypes.common import is_scalar @@ -102,9 +103,9 @@ def test_comparison_ops(comparison_op, other): -0.0, False, np.bool_(False), - np.int_(0), + np_long(0), np.float64(0), - np.int_(-0), + np_long(-0), np.float64(-0), ], ) @@ -123,7 +124,7 @@ def test_pow_special(value, asarray): @pytest.mark.parametrize( - "value", [1, 1.0, True, np.bool_(True), np.int_(1), np.float64(1)] + "value", [1, 1.0, True, np.bool_(True), np_long(1), np.float64(1)] ) @pytest.mark.parametrize("asarray", [True, False]) def test_rpow_special(value, asarray): @@ -133,14 +134,14 @@ def test_rpow_special(value, asarray): if asarray: result = result[0] - elif not isinstance(value, (np.float64, np.bool_, np.int_)): + elif not isinstance(value, (np.float64, np.bool_, np_long)): # this assertion isn't possible with asarray=True assert isinstance(result, type(value)) assert result == value -@pytest.mark.parametrize("value", [-1, -1.0, np.int_(-1), np.float64(-1)]) +@pytest.mark.parametrize("value", [-1, -1.0, np_long(-1), np.float64(-1)]) @pytest.mark.parametrize("asarray", [True, False]) def test_rpow_minus_one(value, asarray): if asarray: diff --git a/pandas/tests/series/methods/test_reindex.py b/pandas/tests/series/methods/test_reindex.py index f3075c116883a..9d6611cd53068 100644 --- a/pandas/tests/series/methods/test_reindex.py +++ b/pandas/tests/series/methods/test_reindex.py @@ -197,7 +197,7 @@ def test_reindex_int(datetime_series): # NO NaNs introduced reindexed_int = int_ts.reindex(int_ts.index[::2]) - assert reindexed_int.dtype == np.int_ + assert reindexed_int.dtype == np.dtype(int) def test_reindex_bool(datetime_series): diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py index f294885fb8f4d..be68918d2a380 100644 --- a/pandas/tests/series/test_repr.py +++ b/pandas/tests/series/test_repr.py @@ -348,7 +348,7 @@ def test_categorical_series_repr(self): 8 8 9 9 dtype: category -Categories (10, {np.int_().dtype}): [0, 1, 2, 3, ..., 6, 7, 8, 9]""" +Categories (10, {np.dtype(int)}): [0, 1, 2, 3, ..., 6, 7, 8, 9]""" assert repr(s) == exp @@ -374,7 +374,7 @@ def test_categorical_series_repr_ordered(self): 8 8 9 9 dtype: category -Categories (10, {np.int_().dtype}): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]""" +Categories (10, {np.dtype(int)}): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]""" assert repr(s) == exp diff --git a/pyproject.toml b/pyproject.toml index 89432c2353ea8..a8388a9ff52de 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -505,6 +505,8 @@ filterwarnings = [ "ignore:distutils Version classes are deprecated:DeprecationWarning:numexpr", "ignore:distutils Version classes are deprecated:DeprecationWarning:fastparquet", "ignore:distutils Version classes are deprecated:DeprecationWarning:fsspec", + # Can be removed once https://github.com/numpy/numpy/pull/24794 is merged + "ignore:.*In the future `np.long` will be defined as.*:FutureWarning", ] junit_family = "xunit2" markers = [
Hi! This PR addresses changes that will be shipped in https://github.com/numpy/numpy/pull/24794 - deprecation of `np.int_` and `np.uint`. An explanation of changes: - In places where `np.int_(value)` was used I replaced it with backward compatible `np_long(value)`. - If dtypes were directly compared I replaced `np.int_` with `np.dtype(int)` (they are equivalent). - For `dtype=` arguments I used `int` for conciseness.
https://api.github.com/repos/pandas-dev/pandas/pulls/55369
2023-10-03T10:33:58Z
2023-10-03T22:42:23Z
2023-10-03T22:42:23Z
2023-10-13T10:58:04Z
BUG: idxmin/max raising for arrow dtypes
diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index 8aba2bcbecf2f..bd1b301ccb119 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -22,6 +22,7 @@ Fixed regressions Bug fixes ~~~~~~~~~ +- Fixed bug in :meth:`DataFrame.idxmin` and :meth:`DataFrame.idxmax` raising for arrow dtypes (:issue:`55368`) - Fixed bug in :meth:`DataFrame.resample` not respecting ``closed`` and ``label`` arguments for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55282`) - Fixed bug in :meth:`DataFrame.resample` where bin edges were not correct for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55281`) - Silence ``Period[B]`` warnings introduced by :issue:`53446` during normal plotting activity (:issue:`55138`) diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index 0579aa3760531..2f1f80079e925 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -30,7 +30,10 @@ from pandas.util._decorators import doc from pandas.util._validators import validate_fillna_kwargs -from pandas.core.dtypes.cast import can_hold_element +from pandas.core.dtypes.cast import ( + can_hold_element, + infer_dtype_from_scalar, +) from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, @@ -1624,13 +1627,21 @@ def _reduce( pa_result = self._reduce_pyarrow(name, skipna=skipna, **kwargs) if keepdims: - result = pa.array([pa_result.as_py()], type=pa_result.type) + if isinstance(pa_result, pa.Scalar): + result = pa.array([pa_result.as_py()], type=pa_result.type) + else: + result = pa.array( + [pa_result], + type=to_pyarrow_type(infer_dtype_from_scalar(pa_result)[0]), + ) return type(self)(result) if pc.is_null(pa_result).as_py(): return self.dtype.na_value - else: + elif isinstance(pa_result, pa.Scalar): return pa_result.as_py() + else: + return pa_result def _explode(self): """ diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py index e66557f132c1d..77f64b18a82f8 100644 --- a/pandas/tests/frame/test_reductions.py +++ b/pandas/tests/frame/test_reductions.py @@ -1056,6 +1056,19 @@ def test_idxmax_numeric_only(self, numeric_only): expected = Series([1, 0, 1], index=["a", "b", "c"]) tm.assert_series_equal(result, expected) + def test_idxmax_arrow_types(self): + # GH#55368 + pytest.importorskip("pyarrow") + + df = DataFrame({"a": [2, 3, 1], "b": [2, 1, 1]}, dtype="int64[pyarrow]") + result = df.idxmax() + expected = Series([1, 0], index=["a", "b"]) + tm.assert_series_equal(result, expected) + + result = df.idxmin() + expected = Series([2, 1], index=["a", "b"]) + tm.assert_series_equal(result, expected) + def test_idxmax_axis_2(self, float_frame): frame = float_frame msg = "No axis named 2 for object type DataFrame"
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55368
2023-10-03T09:45:53Z
2023-10-03T17:48:35Z
2023-10-03T17:48:35Z
2023-10-03T17:54:46Z
BUG: all not ignoring na when skipna=True
diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index df19b01610a7a..87b71eac76d1a 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -29,6 +29,7 @@ Bug fixes - Fixed bug in :meth:`DataFrame.resample` not respecting ``closed`` and ``label`` arguments for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55282`) - Fixed bug in :meth:`DataFrame.resample` where bin edges were not correct for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55281`) - Fixed bug in :meth:`Index.insert` raising when inserting ``None`` into :class:`Index` with ``dtype="string[pyarrow_numpy]"`` (:issue:`55365`) +- Fixed bug in :meth:`Series.all` and :meth:`Series.any` not treating missing values correctly for ``dtype="string[pyarrow_numpy]"`` (:issue:`55367`) - Fixed bug in :meth:`Series.rank` for ``string[pyarrow_numpy]`` dtype (:issue:`55362`) - Silence ``Period[B]`` warnings introduced by :issue:`53446` during normal plotting activity (:issue:`55138`) - diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py index db11e74951ee5..24b99b5d4852e 100644 --- a/pandas/core/arrays/string_arrow.py +++ b/pandas/core/arrays/string_arrow.py @@ -631,9 +631,11 @@ def _reduce( self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs ): if name in ["any", "all"]: - arr = pc.and_kleene( - pc.invert(pc.is_null(self._pa_array)), pc.not_equal(self._pa_array, "") - ) + if not skipna and name == "all": + nas = pc.invert(pc.is_null(self._pa_array)) + arr = pc.and_kleene(nas, pc.not_equal(self._pa_array, "")) + else: + arr = pc.not_equal(self._pa_array, "") return ArrowExtensionArray(arr)._reduce( name, skipna=skipna, keepdims=keepdims, **kwargs ) diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py index 021252500e814..560b2377ada70 100644 --- a/pandas/tests/reductions/test_reductions.py +++ b/pandas/tests/reductions/test_reductions.py @@ -1087,7 +1087,8 @@ def test_any_all_pyarrow_string(self): ser = Series([None, "a"], dtype="string[pyarrow_numpy]") assert ser.any() - assert not ser.all() + assert ser.all() + assert not ser.all(skipna=False) ser = Series([None, ""], dtype="string[pyarrow_numpy]") assert not ser.any()
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55367
2023-10-03T09:45:14Z
2023-10-04T17:07:26Z
2023-10-04T17:07:26Z
2023-10-04T19:58:48Z
BUG: Inserting ndim=0 array does not infer string dtype
diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index 38ef8c8455b9d..e89a268c4256e 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -23,6 +23,7 @@ Fixed regressions Bug fixes ~~~~~~~~~ - Fixed bug in :meth:`Categorical.equals` if other has arrow backed string dtype (:issue:`55364`) +- Fixed bug in :meth:`DataFrame.__setitem__` not inferring string dtype for zero-dimensional array with ``infer_string=True`` (:issue:`55366`) - Fixed bug in :meth:`DataFrame.idxmin` and :meth:`DataFrame.idxmax` raising for arrow dtypes (:issue:`55368`) - Fixed bug in :meth:`DataFrame.interpolate` raising incorrect error message (:issue:`55347`) - Fixed bug in :meth:`DataFrame.resample` not respecting ``closed`` and ``label`` arguments for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55282`) diff --git a/pandas/core/construction.py b/pandas/core/construction.py index aaac0dc73486f..e661d590ab330 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -562,7 +562,12 @@ def sanitize_array( if not is_list_like(data): if index is None: raise ValueError("index must be specified when data is not list-like") + if isinstance(data, str) and using_pyarrow_string_dtype(): + from pandas.core.arrays.string_ import StringDtype + + dtype = StringDtype("pyarrow_numpy") data = construct_1d_arraylike_from_scalar(data, len(index), dtype) + return data elif isinstance(data, ABCExtensionArray): diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py index 370cbf0f33174..6d4eedb49ff83 100644 --- a/pandas/tests/frame/indexing/test_indexing.py +++ b/pandas/tests/frame/indexing/test_indexing.py @@ -1905,6 +1905,19 @@ def test_adding_new_conditional_column() -> None: tm.assert_frame_equal(df, expected) +def test_add_new_column_infer_string(): + # GH#55366 + pytest.importorskip("pyarrow") + df = DataFrame({"x": [1]}) + with pd.option_context("future.infer_string", True): + df.loc[df["x"] == 1, "y"] = "1" + expected = DataFrame( + {"x": [1], "y": Series(["1"], dtype="string[pyarrow_numpy]")}, + columns=Index(["x", "y"], dtype="string[pyarrow_numpy]"), + ) + tm.assert_frame_equal(df, expected) + + class TestSetitemValidation: # This is adapted from pandas/tests/arrays/masked/test_indexing.py # but checks for warnings instead of errors.
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55366
2023-10-03T09:43:58Z
2023-10-04T08:39:20Z
2023-10-04T08:39:20Z
2023-10-04T08:39:23Z
BUG: Index.insert raising when inserting None into new string dtype
diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index 8aba2bcbecf2f..0e2cfadf7b097 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -24,6 +24,7 @@ Bug fixes ~~~~~~~~~ - Fixed bug in :meth:`DataFrame.resample` not respecting ``closed`` and ``label`` arguments for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55282`) - Fixed bug in :meth:`DataFrame.resample` where bin edges were not correct for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55281`) +- Fixed bug in :meth:`Index.insert` raising when inserting ``None`` into :class:`Index` with ``dtype="string[pyarrow_numpy]"`` (:issue:`55365`) - Silence ``Period[B]`` warnings introduced by :issue:`53446` during normal plotting activity (:issue:`55138`) - diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py index 6262055827428..e904123849821 100644 --- a/pandas/core/arrays/string_arrow.py +++ b/pandas/core/arrays/string_arrow.py @@ -613,3 +613,8 @@ def _reduce( ) else: return super()._reduce(name, skipna=skipna, keepdims=keepdims, **kwargs) + + def insert(self, loc: int, item) -> ArrowStringArrayNumpySemantics: + if item is np.nan: + item = libmissing.NA + return super().insert(loc, item) # type: ignore[return-value] diff --git a/pandas/tests/indexes/base_class/test_reshape.py b/pandas/tests/indexes/base_class/test_reshape.py index 5ecb2c753644d..6586f5f9de480 100644 --- a/pandas/tests/indexes/base_class/test_reshape.py +++ b/pandas/tests/indexes/base_class/test_reshape.py @@ -54,6 +54,14 @@ def test_insert_datetime_into_object(self, loc, val): tm.assert_index_equal(result, expected) assert type(expected[2]) is type(val) + def test_insert_none_into_string_numpy(self): + # GH#55365 + pytest.importorskip("pyarrow") + index = Index(["a", "b", "c"], dtype="string[pyarrow_numpy]") + result = index.insert(-1, None) + expected = Index(["a", "b", None, "c"], dtype="string[pyarrow_numpy]") + tm.assert_index_equal(result, expected) + @pytest.mark.parametrize( "pos,expected", [
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55365
2023-10-03T09:42:37Z
2023-10-03T20:02:36Z
2023-10-03T20:02:36Z
2023-10-03T20:09:31Z
BUG: eq not implemented for categorical and arrow backed strings
diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index bd1b301ccb119..8371e02a3372c 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -22,6 +22,7 @@ Fixed regressions Bug fixes ~~~~~~~~~ +- Fixed bug in :meth:`Categorical.equals` if other has arrow backed string dtype (:issue:`55364`) - Fixed bug in :meth:`DataFrame.idxmin` and :meth:`DataFrame.idxmax` raising for arrow dtypes (:issue:`55368`) - Fixed bug in :meth:`DataFrame.resample` not respecting ``closed`` and ``label`` arguments for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55282`) - Fixed bug in :meth:`DataFrame.resample` where bin edges were not correct for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55281`) diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index 2f1f80079e925..2c788411eb089 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -35,6 +35,7 @@ infer_dtype_from_scalar, ) from pandas.core.dtypes.common import ( + CategoricalDtype, is_array_like, is_bool_dtype, is_integer, @@ -631,7 +632,9 @@ def __setstate__(self, state) -> None: def _cmp_method(self, other, op): pc_func = ARROW_CMP_FUNCS[op.__name__] - if isinstance(other, (ArrowExtensionArray, np.ndarray, list, BaseMaskedArray)): + if isinstance( + other, (ArrowExtensionArray, np.ndarray, list, BaseMaskedArray) + ) or isinstance(getattr(other, "dtype", None), CategoricalDtype): result = pc_func(self._pa_array, self._box_pa(other)) elif is_scalar(other): try: diff --git a/pandas/tests/indexes/categorical/test_equals.py b/pandas/tests/indexes/categorical/test_equals.py index 1ed8f3a903439..a8353f301a3c3 100644 --- a/pandas/tests/indexes/categorical/test_equals.py +++ b/pandas/tests/indexes/categorical/test_equals.py @@ -88,3 +88,9 @@ def test_equals_multiindex(self): ci = mi.to_flat_index().astype("category") assert not ci.equals(mi) + + def test_equals_string_dtype(self, any_string_dtype): + # GH#55364 + idx = CategoricalIndex(list("abc"), name="B") + other = Index(["a", "b", "c"], name="B", dtype=any_string_dtype) + assert idx.equals(other)
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55364
2023-10-03T09:42:01Z
2023-10-03T20:01:35Z
2023-10-03T20:01:35Z
2023-10-03T20:01:39Z
BUG: ndim of string block incorrect with string inference
diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 6f30bc650aa36..d6aeda3d418ed 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -383,7 +383,7 @@ def ndarray_to_mgr( new_block( dtype.construct_array_type()._from_sequence(data, dtype=dtype), BlockPlacement(slice(i, i + 1)), - ndim=1, + ndim=2, ) for i, data in enumerate(obj_columns) ] diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index fd851ab244cb8..4b41050c86467 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -2743,6 +2743,13 @@ def test_frame_string_inference_array_string_dtype(self): df = DataFrame(np.array([["a", "c"], ["b", "d"]]), columns=["a", "b"]) tm.assert_frame_equal(df, expected) + def test_frame_string_inference_block_dim(self): + # GH#55363 + pytest.importorskip("pyarrow") + with pd.option_context("future.infer_string", True): + df = DataFrame(np.array([["hello", "goodbye"], ["hello", "Hello"]])) + assert df._mgr.blocks[0].ndim == 2 + class TestDataFrameConstructorIndexInference: def test_frame_from_dict_of_series_overlapping_monthly_period_indexes(self):
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55363
2023-10-03T09:41:05Z
2023-10-03T16:54:15Z
2023-10-03T16:54:15Z
2023-10-03T17:48:47Z
BUG: rank raising for arrow string dtypes
diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index 38ef8c8455b9d..6f537ad7256f5 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -28,6 +28,7 @@ Bug fixes - Fixed bug in :meth:`DataFrame.resample` not respecting ``closed`` and ``label`` arguments for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55282`) - Fixed bug in :meth:`DataFrame.resample` where bin edges were not correct for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55281`) - Fixed bug in :meth:`Index.insert` raising when inserting ``None`` into :class:`Index` with ``dtype="string[pyarrow_numpy]"`` (:issue:`55365`) +- Fixed bug in :meth:`Series.rank` for ``string[pyarrow_numpy]`` dtype (:issue:`55362`) - Silence ``Period[B]`` warnings introduced by :issue:`53446` during normal plotting activity (:issue:`55138`) - diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index 2c788411eb089..12fe9b30f3f52 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -1747,7 +1747,7 @@ def __setitem__(self, key, value) -> None: data = pa.chunked_array([data]) self._pa_array = data - def _rank( + def _rank_calc( self, *, axis: AxisInt = 0, @@ -1756,9 +1756,6 @@ def _rank( ascending: bool = True, pct: bool = False, ): - """ - See Series.rank.__doc__. - """ if pa_version_under9p0 or axis != 0: ranked = super()._rank( axis=axis, @@ -1773,7 +1770,7 @@ def _rank( else: pa_type = pa.uint64() result = pa.array(ranked, type=pa_type, from_pandas=True) - return type(self)(result) + return result data = self._pa_array.combine_chunks() sort_keys = "ascending" if ascending else "descending" @@ -1812,7 +1809,29 @@ def _rank( divisor = pc.count(result) result = pc.divide(result, divisor) - return type(self)(result) + return result + + def _rank( + self, + *, + axis: AxisInt = 0, + method: str = "average", + na_option: str = "keep", + ascending: bool = True, + pct: bool = False, + ): + """ + See Series.rank.__doc__. + """ + return type(self)( + self._rank_calc( + axis=axis, + method=method, + na_option=na_option, + ascending=ascending, + pct=pct, + ) + ) def _quantile(self, qs: npt.NDArray[np.float64], interpolation: str) -> Self: """ diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py index e904123849821..db11e74951ee5 100644 --- a/pandas/core/arrays/string_arrow.py +++ b/pandas/core/arrays/string_arrow.py @@ -53,6 +53,7 @@ from collections.abc import Sequence from pandas._typing import ( + AxisInt, Dtype, Scalar, npt, @@ -501,6 +502,28 @@ def _str_find(self, sub: str, start: int = 0, end: int | None = None): def _convert_int_dtype(self, result): return Int64Dtype().__from_arrow__(result) + def _rank( + self, + *, + axis: AxisInt = 0, + method: str = "average", + na_option: str = "keep", + ascending: bool = True, + pct: bool = False, + ): + """ + See Series.rank.__doc__. + """ + return self._convert_int_dtype( + self._rank_calc( + axis=axis, + method=method, + na_option=na_option, + ascending=ascending, + pct=pct, + ) + ) + class ArrowStringArrayNumpySemantics(ArrowStringArray): _storage = "pyarrow_numpy" @@ -584,7 +607,10 @@ def _str_map( return lib.map_infer_mask(arr, f, mask.view("uint8")) def _convert_int_dtype(self, result): - result = result.to_numpy() + if isinstance(result, pa.Array): + result = result.to_numpy(zero_copy_only=False) + else: + result = result.to_numpy() if result.dtype == np.int32: result = result.astype(np.int64) return result diff --git a/pandas/tests/frame/methods/test_rank.py b/pandas/tests/frame/methods/test_rank.py index 8b451c84dc5da..b5b5e42691e59 100644 --- a/pandas/tests/frame/methods/test_rank.py +++ b/pandas/tests/frame/methods/test_rank.py @@ -488,3 +488,15 @@ def test_rank_mixed_axis_zero(self, data, expected): df.rank() result = df.rank(numeric_only=True) tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "dtype, exp_dtype", + [("string[pyarrow]", "Int64"), ("string[pyarrow_numpy]", "float64")], + ) + def test_rank_string_dtype(self, dtype, exp_dtype): + # GH#55362 + pytest.importorskip("pyarrow") + obj = Series(["foo", "foo", None, "foo"], dtype=dtype) + result = obj.rank(method="first") + expected = Series([1, 2, None, 3], dtype=exp_dtype) + tm.assert_series_equal(result, expected)
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55362
2023-10-03T09:40:11Z
2023-10-04T08:39:38Z
2023-10-04T08:39:38Z
2023-10-04T19:57:21Z
Fix typo of extra backtick
diff --git a/doc/source/user_guide/10min.rst b/doc/source/user_guide/10min.rst index 5def84b91705c..2c612e31d33b6 100644 --- a/doc/source/user_guide/10min.rst +++ b/doc/source/user_guide/10min.rst @@ -451,7 +451,7 @@ Merge Concat ~~~~~~ -pandas provides various facilities for easily combining together :class:`Series`` and +pandas provides various facilities for easily combining together :class:`Series` and :class:`DataFrame` objects with various kinds of set logic for the indexes and relational algebra functionality in the case of join / merge-type operations.
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55361
2023-10-03T09:24:57Z
2023-10-03T16:55:34Z
2023-10-03T16:55:34Z
2023-10-03T16:55:42Z
Update algorithms.py
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index c952178f4c998..96a0ab28f7072 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -936,9 +936,12 @@ def value_counts_internal( idx.name = index_name result = Series(counts, index=idx, name=name, copy=False) - +#changed sort behavior to account for bug and in case of future features if sort: - result = result.sort_values(ascending=ascending) + if ascending: + result = result.sort_index() + else: + result = result.sort_index(ascending=False) if normalize: result = result / counts.sum()
Added extra behaviors to the sort logic to account for output in order of original list - [ x] closes #55224 (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55360
2023-10-03T01:59:59Z
2023-10-03T16:58:11Z
null
2023-10-03T16:58:11Z
CLN: assorted
diff --git a/doc/redirects.csv b/doc/redirects.csv index 97cd20b295e65..bd60cc6a732bd 100644 --- a/doc/redirects.csv +++ b/doc/redirects.csv @@ -127,7 +127,6 @@ generated/pandas.api.types.is_number,../reference/api/pandas.api.types.is_number generated/pandas.api.types.is_numeric_dtype,../reference/api/pandas.api.types.is_numeric_dtype generated/pandas.api.types.is_object_dtype,../reference/api/pandas.api.types.is_object_dtype generated/pandas.api.types.is_period_dtype,../reference/api/pandas.api.types.is_period_dtype -generated/pandas.api.types.is_period,../reference/api/pandas.api.types.is_period generated/pandas.api.types.is_re_compilable,../reference/api/pandas.api.types.is_re_compilable generated/pandas.api.types.is_re,../reference/api/pandas.api.types.is_re generated/pandas.api.types.is_scalar,../reference/api/pandas.api.types.is_scalar diff --git a/doc/source/development/contributing_codebase.rst b/doc/source/development/contributing_codebase.rst index 41f4b4d5783ea..e0aa8be066914 100644 --- a/doc/source/development/contributing_codebase.rst +++ b/doc/source/development/contributing_codebase.rst @@ -528,7 +528,7 @@ If a test is known to fail but the manner in which it fails is not meant to be captured, use ``pytest.mark.xfail`` It is common to use this method for a test that exhibits buggy behavior or a non-implemented feature. If the failing test has flaky behavior, use the argument ``strict=False``. This -will make it so pytest does not fail if the test happens to pass. +will make it so pytest does not fail if the test happens to pass. Using ``strict=False`` is highly undesirable, please use it only as a last resort. Prefer the decorator ``@pytest.mark.xfail`` and the argument ``pytest.param`` over usage within a test so that the test is appropriately marked during the diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 177c105688d0c..31c143ee012bb 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -1729,6 +1729,17 @@ def transpose(self, *axes: int) -> ExtensionArray: Because ExtensionArrays are always 1D, this is a no-op. It is included for compatibility with np.ndarray. + + Returns + ------- + ExtensionArray + + Examples + -------- + >>> pd.array([1, 2, 3]).transpose() + <IntegerArray> + [1, 2, 3] + Length: 3, dtype: Int64 """ return self[:] diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 11f2cc8ebf1ff..cb22ad456c9b5 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3501,7 +3501,7 @@ def _intersection(self, other: Index, sort: bool = False): pass else: # TODO: algos.unique1d should preserve DTA/TDA - if is_numeric_dtype(self): + if is_numeric_dtype(self.dtype): # This is faster, because Index.unique() checks for uniqueness # before calculating the unique values. res = algos.unique1d(res_indexer) @@ -5020,7 +5020,10 @@ def _can_use_libjoin(self) -> bool: ) # Exclude index types where the conversion to numpy converts to object dtype, # which negates the performance benefit of libjoin - # TODO: exclude RangeIndex? Seems to break test_concat_datetime_timezone + # Subclasses should override to return False if _get_join_target is + # not zero-copy. + # TODO: exclude RangeIndex (which allocates memory)? + # Doing so seems to break test_concat_datetime_timezone return not isinstance(self, (ABCIntervalIndex, ABCMultiIndex)) # -------------------------------------------------------------------- @@ -6176,8 +6179,8 @@ def _get_indexer_non_comparable( If doing an inequality check, i.e. method is not None. """ if method is not None: - other = _unpack_nested_dtype(target) - raise TypeError(f"Cannot compare dtypes {self.dtype} and {other.dtype}") + other_dtype = _unpack_nested_dtype(target) + raise TypeError(f"Cannot compare dtypes {self.dtype} and {other_dtype}") no_matches = -1 * np.ones(target.shape, dtype=np.intp) if unique: @@ -6288,8 +6291,7 @@ def _should_compare(self, other: Index) -> bool: # respectively. return False - other = _unpack_nested_dtype(other) - dtype = other.dtype + dtype = _unpack_nested_dtype(other) return self._is_comparable_dtype(dtype) or is_object_dtype(dtype) def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: @@ -7592,7 +7594,7 @@ def get_unanimous_names(*indexes: Index) -> tuple[Hashable, ...]: return names -def _unpack_nested_dtype(other: Index) -> Index: +def _unpack_nested_dtype(other: Index) -> DtypeObj: """ When checking if our dtype is comparable with another, we need to unpack CategoricalDtype to look at its categories.dtype. @@ -7603,20 +7605,20 @@ def _unpack_nested_dtype(other: Index) -> Index: Returns ------- - Index + np.dtype or ExtensionDtype """ dtype = other.dtype if isinstance(dtype, CategoricalDtype): # If there is ever a SparseIndex, this could get dispatched # here too. - return dtype.categories + return dtype.categories.dtype elif isinstance(dtype, ArrowDtype): # GH 53617 import pyarrow as pa if pa.types.is_dictionary(dtype.pyarrow_dtype): - other = other.astype(ArrowDtype(dtype.pyarrow_dtype.value_type)) - return other + other = other[:0].astype(ArrowDtype(dtype.pyarrow_dtype.value_type)) + return other.dtype def _maybe_try_sort(result: Index | ArrayLike, sort: bool | None): diff --git a/pandas/core/series.py b/pandas/core/series.py index a5e692431e890..417a2515a33f0 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -4004,7 +4004,8 @@ def argsort( if mask.any(): # TODO(3.0): once this deprecation is enforced we can call - # self.array.argsort directly, which will close GH#43840 + # self.array.argsort directly, which will close GH#43840 and + # GH#12694 warnings.warn( "The behavior of Series.argsort in the presence of NA values is " "deprecated. In a future version, NA values will be ordered " diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index d96fc02e16d0d..1b1d9d7640058 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -88,7 +88,7 @@ def get_indexer_indexer( # error: Incompatible types in assignment (expression has type # "Union[ExtensionArray, ndarray[Any, Any], Index, Series]", variable has # type "Index") - target = ensure_key_mapped(target, key, levels=level) # type:ignore[assignment] + target = ensure_key_mapped(target, key, levels=level) # type: ignore[assignment] target = target._sort_levels_monotonic() if level is not None: diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index 59e0ca0591ced..2eee506e1feb7 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -2166,6 +2166,19 @@ def test_loc_setitem_with_expansion_preserves_nullable_int(self, dtype): result.loc[df.index, "data"] = ser._values tm.assert_frame_equal(result, df) + def test_loc_setitem_ea_not_full_column(self): + # GH#39163 + df = DataFrame({"A": range(5)}) + + val = date_range("2016-01-01", periods=3, tz="US/Pacific") + + df.loc[[0, 1, 2], "B"] = val + + bex = val.append(DatetimeIndex([pd.NaT, pd.NaT], dtype=val.dtype)) + expected = DataFrame({"A": range(5), "B": bex}) + assert expected.dtypes["B"] == val.dtype + tm.assert_frame_equal(df, expected) + class TestLocCallable: def test_frame_loc_getitem_callable(self): diff --git a/pandas/tests/tslibs/test_npy_units.py b/pandas/tests/tslibs/test_npy_units.py new file mode 100644 index 0000000000000..6d05dc79fbb2c --- /dev/null +++ b/pandas/tests/tslibs/test_npy_units.py @@ -0,0 +1,27 @@ +import numpy as np + +from pandas._libs.tslibs.dtypes import abbrev_to_npy_unit +from pandas._libs.tslibs.vectorized import is_date_array_normalized + +# a datetime64 ndarray which *is* normalized +day_arr = np.arange(10, dtype="i8").view("M8[D]") + + +class TestIsDateArrayNormalized: + def test_is_date_array_normalized_day(self): + arr = day_arr + abbrev = "D" + unit = abbrev_to_npy_unit(abbrev) + result = is_date_array_normalized(arr.view("i8"), None, unit) + assert result is True + + def test_is_date_array_normalized_seconds(self): + abbrev = "s" + arr = day_arr.astype(f"M8[{abbrev}]") + unit = abbrev_to_npy_unit(abbrev) + result = is_date_array_normalized(arr.view("i8"), None, unit) + assert result is True + + arr[0] += np.timedelta64(1, abbrev) + result2 = is_date_array_normalized(arr.view("i8"), None, unit) + assert result2 is False
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55359
2023-10-02T20:58:40Z
2023-10-06T20:01:48Z
2023-10-06T20:01:48Z
2023-10-06T21:59:46Z
Backport PR #55350 on branch 2.1.x (Bump pypa/cibuildwheel from 2.16.0 to 2.16.1)
diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index ecb30fefb9ff2..b156c3d9dfae7 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -138,7 +138,7 @@ jobs: run: echo "sdist_name=$(cd ./dist && ls -d */)" >> "$GITHUB_ENV" - name: Build wheels - uses: pypa/cibuildwheel@v2.16.0 + uses: pypa/cibuildwheel@v2.16.1 with: package-dir: ./dist/${{ matrix.buildplat[1] == 'macosx_*' && env.sdist_name || needs.build_sdist.outputs.sdist_file }} env:
Backport PR #55350: Bump pypa/cibuildwheel from 2.16.0 to 2.16.1
https://api.github.com/repos/pandas-dev/pandas/pulls/55358
2023-10-02T17:34:36Z
2023-10-02T20:54:25Z
2023-10-02T20:54:25Z
2023-10-02T20:54:25Z
Backport PR #55348 on branch 2.1.x (REGR: join segfaulting for arrow string with nulls)
diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index 6fec66ec8d556..158cb51f05316 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -14,7 +14,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ - Fixed bug where PDEP-6 warning about setting an item of an incompatible dtype was being shown when creating a new conditional column (:issue:`55025`) -- +- Fixed regression in :meth:`DataFrame.join` where result has missing values and dtype is arrow backed string (:issue:`55348`) .. --------------------------------------------------------------------------- .. _whatsnew_212.bug_fixes: diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index d36ceff800c56..74181f8dc853c 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -2442,6 +2442,8 @@ def _factorize_keys( .astype(np.intp, copy=False), len(dc.dictionary), ) + if dc.null_count > 0: + count += 1 if how == "right": return rlab, llab, count return llab, rlab, count diff --git a/pandas/tests/frame/methods/test_join.py b/pandas/tests/frame/methods/test_join.py index 98f3926968ad0..3b6b86056f8c7 100644 --- a/pandas/tests/frame/methods/test_join.py +++ b/pandas/tests/frame/methods/test_join.py @@ -158,9 +158,14 @@ def test_join_invalid_validate(left_no_dup, right_no_dup): left_no_dup.merge(right_no_dup, on="a", validate="invalid") -def test_join_on_single_col_dup_on_right(left_no_dup, right_w_dups): +@pytest.mark.parametrize("dtype", ["object", "string[pyarrow]"]) +def test_join_on_single_col_dup_on_right(left_no_dup, right_w_dups, dtype): # GH 46622 # Dups on right allowed by one_to_many constraint + if dtype == "string[pyarrow]": + pytest.importorskip("pyarrow") + left_no_dup = left_no_dup.astype(dtype) + right_w_dups.index = right_w_dups.index.astype(dtype) left_no_dup.join( right_w_dups, on="a",
Backport PR #55348: REGR: join segfaulting for arrow string with nulls
https://api.github.com/repos/pandas-dev/pandas/pulls/55357
2023-10-02T16:53:31Z
2023-10-02T20:54:09Z
2023-10-02T20:54:09Z
2023-10-02T20:54:09Z
[pre-commit.ci] pre-commit autoupdate
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b0b511e1048c6..c911edfa03670 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -84,7 +84,7 @@ repos: '--filter=-readability/casting,-runtime/int,-build/include_subdir,-readability/fn_size' ] - repo: https://github.com/pylint-dev/pylint - rev: v3.0.0a7 + rev: v3.0.0b0 hooks: - id: pylint stages: [manual]
<!--pre-commit.ci start--> updates: - [github.com/pylint-dev/pylint: v3.0.0a7 → v3.0.0b0](https://github.com/pylint-dev/pylint/compare/v3.0.0a7...v3.0.0b0) <!--pre-commit.ci end-->
https://api.github.com/repos/pandas-dev/pandas/pulls/55356
2023-10-02T16:44:19Z
2023-10-02T17:35:25Z
2023-10-02T17:35:25Z
2023-10-02T17:35:28Z
DEPR: create_block_manager_from_blocks
diff --git a/pandas/core/internals/__init__.py b/pandas/core/internals/__init__.py index 284f8ef135d99..0f8cb9f053174 100644 --- a/pandas/core/internals/__init__.py +++ b/pandas/core/internals/__init__.py @@ -16,7 +16,6 @@ from pandas.core.internals.managers import ( BlockManager, SingleBlockManager, - create_block_manager_from_blocks, ) __all__ = [ @@ -31,8 +30,6 @@ "SingleBlockManager", "SingleArrayManager", "concatenate_managers", - # this is preserved here for downstream compatibility (GH-33892) - "create_block_manager_from_blocks", ] @@ -41,6 +38,18 @@ def __getattr__(name: str): from pandas.util._exceptions import find_stack_level + if name == "create_block_manager_from_blocks": + # GH#33892 + warnings.warn( + f"{name} is deprecated and will be removed in a future version. " + "Use public APIs instead.", + DeprecationWarning, + stacklevel=find_stack_level(), + ) + from pandas.core.internals.managers import create_block_manager_from_blocks + + return create_block_manager_from_blocks + if name in ["NumericBlock", "ObjectBlock"]: warnings.warn( f"{name} is deprecated and will be removed in a future version. " diff --git a/pandas/tests/internals/test_api.py b/pandas/tests/internals/test_api.py index 5cd6c718260ea..ffc672cc748be 100644 --- a/pandas/tests/internals/test_api.py +++ b/pandas/tests/internals/test_api.py @@ -4,6 +4,7 @@ """ import pandas as pd +import pandas._testing as tm from pandas.core import internals from pandas.core.internals import api @@ -37,7 +38,6 @@ def test_namespace(): "SingleBlockManager", "SingleArrayManager", "concatenate_managers", - "create_block_manager_from_blocks", ] result = [x for x in dir(internals) if not x.startswith("__")] @@ -51,3 +51,15 @@ def test_make_block_2d_with_dti(): assert blk.shape == (1, 3) assert blk.values.shape == (1, 3) + + +def test_create_block_manager_from_blocks_deprecated(): + # GH#33892 + # If they must, downstream packages should get this from internals.api, + # not internals. + msg = ( + "create_block_manager_from_blocks is deprecated and will be " + "removed in a future version. Use public APIs instead" + ) + with tm.assert_produces_warning(DeprecationWarning, match=msg): + internals.create_block_manager_from_blocks
null
https://api.github.com/repos/pandas-dev/pandas/pulls/55355
2023-10-02T15:13:19Z
2023-10-17T21:19:49Z
2023-10-17T21:19:49Z
2023-10-17T22:26:18Z
DOC: Typo: missing space.
diff --git a/pandas/io/xml.py b/pandas/io/xml.py index 918fe4d22ea62..bd3b515dbca2f 100644 --- a/pandas/io/xml.py +++ b/pandas/io/xml.py @@ -88,7 +88,7 @@ class _XMLFrameParser: Parse only the attributes at the specified ``xpath``. names : list - Column names for :class:`~pandas.DataFrame`of parsed XML data. + Column names for :class:`~pandas.DataFrame` of parsed XML data. dtype : dict Data type for data or columns. E.g. {{'a': np.float64,
Without trailing space the interpreted text is not close. Even if it were we want a space in the documentation. (Note, if one does not want a space between the closing backtick and the subsequent text one must use backslash-space.
https://api.github.com/repos/pandas-dev/pandas/pulls/55354
2023-10-02T12:39:08Z
2023-10-02T16:48:21Z
2023-10-02T16:48:21Z
2023-10-02T16:48:28Z
DOC: Typo: directive must have a space after `::`
diff --git a/pandas/core/resample.py b/pandas/core/resample.py index e9b2bacd9e1df..7d37a9f1d5113 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -986,7 +986,7 @@ def interpolate( downcast : optional, 'infer' or None, defaults to None Downcast dtypes if possible. - .. deprecated::2.1.0 + .. deprecated:: 2.1.0 ``**kwargs`` : optional Keyword arguments to pass on to the interpolating function.
Otherwise it may be interpreted as a comment.
https://api.github.com/repos/pandas-dev/pandas/pulls/55353
2023-10-02T12:34:58Z
2023-10-02T16:48:56Z
2023-10-02T16:48:56Z
2023-10-02T16:49:04Z
BUG: MultiIndex.get_indexer with method not raising for non-monotonic
diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 9dc095e6de6ff..1bf18bfba4b84 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -310,7 +310,7 @@ Missing MultiIndex ^^^^^^^^^^ -- +- Bug in :meth:`MultiIndex.get_indexer` not raising ``ValueError`` when ``method`` provided and index is non-monotonic (:issue:`53452`) - I/O diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index e23887159c9c6..9017ff121976b 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4011,8 +4011,8 @@ def _get_fill_indexer( self, target: Index, method: str_t, limit: int | None = None, tolerance=None ) -> npt.NDArray[np.intp]: if self._is_multi: - # TODO: get_indexer_with_fill docstring says values must be _sorted_ - # but that doesn't appear to be enforced + if not (self.is_monotonic_increasing or self.is_monotonic_decreasing): + raise ValueError("index must be monotonic increasing or decreasing") # error: "IndexEngine" has no attribute "get_indexer_with_fill" engine = self._engine with warnings.catch_warnings(): diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py index 78b2c493ec116..d86692477f381 100644 --- a/pandas/tests/indexes/multi/test_indexing.py +++ b/pandas/tests/indexes/multi/test_indexing.py @@ -342,6 +342,19 @@ def test_get_indexer_methods(self): expected = np.array([4, 6, 7], dtype=pad_indexer.dtype) tm.assert_almost_equal(expected, pad_indexer) + @pytest.mark.parametrize("method", ["pad", "ffill", "backfill", "bfill", "nearest"]) + def test_get_indexer_methods_raise_for_non_monotonic(self, method): + # 53452 + mi = MultiIndex.from_arrays([[0, 4, 2], [0, 4, 2]]) + if method == "nearest": + err = NotImplementedError + msg = "not implemented yet for MultiIndex" + else: + err = ValueError + msg = "index must be monotonic increasing or decreasing" + with pytest.raises(err, match=msg): + mi.get_indexer([(1, 1)], method=method) + def test_get_indexer_three_or_more_levels(self): # https://github.com/pandas-dev/pandas/issues/29896 # tests get_indexer() on MultiIndexes with 3+ levels
- [x] closes #53452 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/v2.2.0.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55352
2023-10-02T11:20:43Z
2023-10-02T16:50:57Z
2023-10-02T16:50:57Z
2023-11-16T12:56:50Z
Bump pypa/cibuildwheel from 2.16.0 to 2.16.1
diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 4c7a7b329777b..efa14dd966eb1 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -138,7 +138,7 @@ jobs: run: echo "sdist_name=$(cd ./dist && ls -d */)" >> "$GITHUB_ENV" - name: Build wheels - uses: pypa/cibuildwheel@v2.16.0 + uses: pypa/cibuildwheel@v2.16.1 with: package-dir: ./dist/${{ matrix.buildplat[1] == 'macosx_*' && env.sdist_name || needs.build_sdist.outputs.sdist_file }} env:
Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 2.16.0 to 2.16.1. <details> <summary>Release notes</summary> <p><em>Sourced from <a href="https://github.com/pypa/cibuildwheel/releases">pypa/cibuildwheel's releases</a>.</em></p> <blockquote> <h2>v2.16.1</h2> <ul> <li>🛠 Updates the prerelease CPython 3.12 version to 3.12.0rc3 (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1625">#1625</a>)</li> <li>🛠 Only calls <code>linux32</code> in containers when necessary (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1599">#1599</a>)</li> </ul> </blockquote> </details> <details> <summary>Changelog</summary> <p><em>Sourced from <a href="https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md">pypa/cibuildwheel's changelog</a>.</em></p> <blockquote> <h3>v2.16.1</h3> <p><em>26 September 2023</em></p> <ul> <li>🛠 Updates the prerelease CPython 3.12 version to 3.12.0rc3 (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1625">#1625</a>)</li> <li>🛠 Only calls <code>linux32</code> in containers when necessary (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1599">#1599</a>)</li> </ul> </blockquote> </details> <details> <summary>Commits</summary> <ul> <li><a href="https://github.com/pypa/cibuildwheel/commit/7da7df1efc530f07d1945c00934b8cfd34be0d50"><code>7da7df1</code></a> Bump version: v2.16.1</li> <li><a href="https://github.com/pypa/cibuildwheel/commit/9deb1b6794913450fc222b73e2ada1a0eec00487"><code>9deb1b6</code></a> Merge pull request <a href="https://redirect.github.com/pypa/cibuildwheel/issues/1625">#1625</a> from pypa/update-dependencies-pr</li> <li><a href="https://github.com/pypa/cibuildwheel/commit/271c5fe6e461766c93e9c45d29923ca93ed751e8"><code>271c5fe</code></a> [pre-commit.ci] pre-commit autoupdate (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1627">#1627</a>)</li> <li><a href="https://github.com/pypa/cibuildwheel/commit/c716cfaeb8bb52556017300e35cbd7044a614804"><code>c716cfa</code></a> Update dependencies</li> <li><a href="https://github.com/pypa/cibuildwheel/commit/099d397aee3ec4bead60ae0c8a554d77b156c86c"><code>099d397</code></a> Merge pull request <a href="https://redirect.github.com/pypa/cibuildwheel/issues/1599">#1599</a> from mayeut/manylinux-entrypoint</li> <li><a href="https://github.com/pypa/cibuildwheel/commit/72222654f48f70f792df979f05e0c1dea470ae35"><code>7222265</code></a> [pre-commit.ci] pre-commit autoupdate (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1619">#1619</a>)</li> <li><a href="https://github.com/pypa/cibuildwheel/commit/7a8b8012ffe79448b50852f8b7c327292ed61f48"><code>7a8b801</code></a> clearer simulate_32_bit initialization</li> <li><a href="https://github.com/pypa/cibuildwheel/commit/0ccf1dc016d1bb88e118ce6549475e82b21217bd"><code>0ccf1dc</code></a> remove GHA runner cached docker images</li> <li><a href="https://github.com/pypa/cibuildwheel/commit/ba11212a135469b1b3c878756cba85d5258ba9c9"><code>ba11212</code></a> use fixture in oci_container_test.py to clean-up images after tests</li> <li><a href="https://github.com/pypa/cibuildwheel/commit/6d0890e7e04f37bf026d12e72031430f4f85b06c"><code>6d0890e</code></a> add tests</li> <li>Additional commits viewable in <a href="https://github.com/pypa/cibuildwheel/compare/v2.16.0...v2.16.1">compare view</a></li> </ul> </details> <br /> [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pypa/cibuildwheel&package-manager=github_actions&previous-version=2.16.0&new-version=2.16.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) --- <details> <summary>Dependabot commands and options</summary> <br /> You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show <dependency name> ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) </details>
https://api.github.com/repos/pandas-dev/pandas/pulls/55350
2023-10-02T08:42:33Z
2023-10-02T17:34:27Z
2023-10-02T17:34:27Z
2023-10-02T17:34:38Z
Update .devcontainer.json
diff --git a/.devcontainer.json b/.devcontainer.json index 7c5d009260c64..609ded1a96d9a 100644 --- a/.devcontainer.json +++ b/.devcontainer.json @@ -7,23 +7,23 @@ // Use 'settings' to set *default* container specific settings.json values on container create. // You can edit these settings after create using File > Preferences > Settings > Remote. - "settings": { - "terminal.integrated.shell.linux": "/bin/bash", - "python.pythonPath": "/usr/local/bin/python", - "python.formatting.provider": "black", - "python.linting.enabled": true, - "python.linting.flake8Enabled": true, - "python.linting.pylintEnabled": false, - "python.linting.mypyEnabled": true, - "python.testing.pytestEnabled": true, - "python.testing.pytestArgs": [ - "pandas" - ] - }, - - // Add the IDs of extensions you want installed when the container is created in the array below. - "extensions": [ - "ms-python.python", - "ms-vscode.cpptools" - ] + "customizations": { + "vscode": { + "terminal.integrated.shell.linux": "/bin/bash", + "python.pythonPath": "/usr/local/bin/python", + "python.formatting.provider": "black", + "python.linting.enabled": true, + "python.linting.flake8Enabled": true, + "python.linting.pylintEnabled": false, + "python.linting.mypyEnabled": true, + "python.testing.pytestEnabled": true, + "python.testing.pytestArgs": [ + "pandas" + ], + "extensions": [ + "ms-python.python", + "ms-vscode.cpptools" + ] + } + } }
Update per new VSCODE conventions Changes "settings" to "customizations: vscode" Moved "extensions" under "customizations: vscode"
https://api.github.com/repos/pandas-dev/pandas/pulls/55349
2023-10-02T04:00:43Z
2023-11-27T18:48:37Z
null
2023-11-27T18:48:38Z
REGR: join segfaulting for arrow string with nulls
diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index 1a25b848e0f84..a5ba365f2d456 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -15,7 +15,7 @@ Fixed regressions ~~~~~~~~~~~~~~~~~ - Fixed bug in :meth:`DataFrame.resample` where bin edges were not correct for :class:`~pandas.tseries.offsets.MonthBegin` (:issue:`55271`) - Fixed bug where PDEP-6 warning about setting an item of an incompatible dtype was being shown when creating a new conditional column (:issue:`55025`) -- +- Fixed regression in :meth:`DataFrame.join` where result has missing values and dtype is arrow backed string (:issue:`55348`) .. --------------------------------------------------------------------------- .. _whatsnew_212.bug_fixes: diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 4b9fcc80af4bb..ba6579a739f54 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -2443,6 +2443,8 @@ def _factorize_keys( .astype(np.intp, copy=False), len(dc.dictionary), ) + if dc.null_count > 0: + count += 1 if how == "right": return rlab, llab, count return llab, rlab, count diff --git a/pandas/tests/frame/methods/test_join.py b/pandas/tests/frame/methods/test_join.py index 2d4ac1d4a4444..3d21faf8b1729 100644 --- a/pandas/tests/frame/methods/test_join.py +++ b/pandas/tests/frame/methods/test_join.py @@ -158,9 +158,14 @@ def test_join_invalid_validate(left_no_dup, right_no_dup): left_no_dup.merge(right_no_dup, on="a", validate="invalid") -def test_join_on_single_col_dup_on_right(left_no_dup, right_w_dups): +@pytest.mark.parametrize("dtype", ["object", "string[pyarrow]"]) +def test_join_on_single_col_dup_on_right(left_no_dup, right_w_dups, dtype): # GH 46622 # Dups on right allowed by one_to_many constraint + if dtype == "string[pyarrow]": + pytest.importorskip("pyarrow") + left_no_dup = left_no_dup.astype(dtype) + right_w_dups.index = right_w_dups.index.astype(dtype) left_no_dup.join( right_w_dups, on="a",
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55348
2023-10-01T21:59:08Z
2023-10-02T16:52:26Z
2023-10-02T16:52:26Z
2023-10-02T21:28:43Z
BUG: interpolate raising wrong error for ea
diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index bd1b301ccb119..28889398a3dd7 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -23,6 +23,7 @@ Fixed regressions Bug fixes ~~~~~~~~~ - Fixed bug in :meth:`DataFrame.idxmin` and :meth:`DataFrame.idxmax` raising for arrow dtypes (:issue:`55368`) +- Fixed bug in :meth:`DataFrame.interpolate` raising incorrect error message (:issue:`55347`) - Fixed bug in :meth:`DataFrame.resample` not respecting ``closed`` and ``label`` arguments for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55282`) - Fixed bug in :meth:`DataFrame.resample` where bin edges were not correct for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55281`) - Silence ``Period[B]`` warnings introduced by :issue:`53446` during normal plotting activity (:issue:`55138`) diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index c06bf7366447b..177c105688d0c 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -893,7 +893,6 @@ def interpolate( limit, limit_direction, limit_area, - fill_value, copy: bool, **kwargs, ) -> Self: diff --git a/pandas/tests/frame/methods/test_interpolate.py b/pandas/tests/frame/methods/test_interpolate.py index 291a79815a81c..67aa07dd83764 100644 --- a/pandas/tests/frame/methods/test_interpolate.py +++ b/pandas/tests/frame/methods/test_interpolate.py @@ -497,3 +497,9 @@ def test_interpolate_empty_df(self): result = df.interpolate(inplace=True) assert result is None tm.assert_frame_equal(df, expected) + + def test_interpolate_ea_raise(self): + # GH#55347 + df = DataFrame({"a": [1, None, 2]}, dtype="Int64") + with pytest.raises(NotImplementedError, match="does not implement"): + df.interpolate()
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55347
2023-10-01T21:23:34Z
2023-10-03T20:02:00Z
2023-10-03T20:02:00Z
2023-10-09T20:06:52Z
BUG: Fix convert_dtypes for all na column and arrow backend
diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 9dc095e6de6ff..b3671d3618791 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -281,7 +281,7 @@ Numeric Conversion ^^^^^^^^^^ -- +- Bug in :meth:`Series.convert_dtypes` not converting all NA column to ``null[pyarrow]`` (:issue:`55346`) - Strings diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 74e785be06356..3208a742738a3 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1133,7 +1133,16 @@ def convert_dtypes( base_dtype = np.dtype(str) else: base_dtype = inferred_dtype - pa_type = to_pyarrow_type(base_dtype) + if ( + base_dtype.kind == "O" # type: ignore[union-attr] + and len(input_array) > 0 + and isna(input_array).all() + ): + import pyarrow as pa + + pa_type = pa.null() + else: + pa_type = to_pyarrow_type(base_dtype) if pa_type is not None: inferred_dtype = ArrowDtype(pa_type) elif dtype_backend == "numpy_nullable" and isinstance(inferred_dtype, ArrowDtype): diff --git a/pandas/tests/series/methods/test_convert_dtypes.py b/pandas/tests/series/methods/test_convert_dtypes.py index d1c79d0f00365..f621604faae4b 100644 --- a/pandas/tests/series/methods/test_convert_dtypes.py +++ b/pandas/tests/series/methods/test_convert_dtypes.py @@ -265,3 +265,11 @@ def test_convert_dtypes_pyarrow_to_np_nullable(self): result = ser.convert_dtypes(dtype_backend="numpy_nullable") expected = pd.Series(range(2), dtype="Int32") tm.assert_series_equal(result, expected) + + def test_convert_dtypes_pyarrow_null(self): + # GH#55346 + pa = pytest.importorskip("pyarrow") + ser = pd.Series([None, None]) + result = ser.convert_dtypes(dtype_backend="pyarrow") + expected = pd.Series([None, None], dtype=pd.ArrowDtype(pa.null())) + tm.assert_series_equal(result, expected)
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55346
2023-10-01T19:30:05Z
2023-10-02T16:36:52Z
2023-10-02T16:36:52Z
2023-10-02T21:28:48Z
REF: check monotonicity inside _can_use_libjoin
diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index 0dc139781f58d..675288e20d1f8 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -43,6 +43,8 @@ from pandas._libs.missing cimport ( is_matching_na, ) +from decimal import InvalidOperation + # Defines shift of MultiIndex codes to avoid negative codes (missing values) multiindex_nulls_shift = 2 @@ -248,6 +250,10 @@ cdef class IndexEngine: @property def is_unique(self) -> bool: + # for why we check is_monotonic_increasing here, see + # https://github.com/pandas-dev/pandas/pull/55342#discussion_r1361405781 + if self.need_monotonic_check: + self.is_monotonic_increasing if self.need_unique_check: self._do_unique_check() @@ -281,7 +287,7 @@ cdef class IndexEngine: values = self.values self.monotonic_inc, self.monotonic_dec, is_strict_monotonic = \ self._call_monotonic(values) - except TypeError: + except (TypeError, InvalidOperation): self.monotonic_inc = 0 self.monotonic_dec = 0 is_strict_monotonic = 0 @@ -843,6 +849,10 @@ cdef class SharedEngine: @property def is_unique(self) -> bool: + # for why we check is_monotonic_increasing here, see + # https://github.com/pandas-dev/pandas/pull/55342#discussion_r1361405781 + if self.need_monotonic_check: + self.is_monotonic_increasing if self.need_unique_check: arr = self.values.unique() self.unique = len(arr) == len(self.values) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 3e2e589440bd9..11d17066ebebd 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3711,7 +3711,7 @@ def memory_usage(self, index: bool = True, deep: bool = False) -> Series: many repeated values. >>> df['object'].astype('category').memory_usage(deep=True) - 5244 + 5136 """ result = self._constructor_sliced( [c.memory_usage(index=False, deep=deep) for col, c in self.items()], diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 88a08dd55f739..998c29fb3227c 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3382,9 +3382,7 @@ def _union(self, other: Index, sort: bool | None): if ( sort in (None, True) - and self.is_monotonic_increasing - and other.is_monotonic_increasing - and not (self.has_duplicates and other.has_duplicates) + and (self.is_unique or other.is_unique) and self._can_use_libjoin and other._can_use_libjoin ): @@ -3536,12 +3534,7 @@ def _intersection(self, other: Index, sort: bool = False): """ intersection specialized to the case with matching dtypes. """ - if ( - self.is_monotonic_increasing - and other.is_monotonic_increasing - and self._can_use_libjoin - and other._can_use_libjoin - ): + if self._can_use_libjoin and other._can_use_libjoin: try: res_indexer, indexer, _ = self._inner_indexer(other) except TypeError: @@ -4980,7 +4973,10 @@ def _get_leaf_sorter(labels: list[np.ndarray]) -> npt.NDArray[np.intp]: def _join_monotonic( self, other: Index, how: JoinHow = "left" ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: - # We only get here with matching dtypes and both monotonic increasing + # We only get here with (caller is responsible for ensuring): + # 1) matching dtypes + # 2) both monotonic increasing + # 3) other.is_unique or self.is_unique assert other.dtype == self.dtype assert self._can_use_libjoin and other._can_use_libjoin @@ -5062,6 +5058,10 @@ def _can_use_libjoin(self) -> bool: making a copy. If we cannot, this negates the performance benefit of using libjoin. """ + if not self.is_monotonic_increasing: + # The libjoin functions all assume monotonicity. + return False + if type(self) is Index: # excludes EAs, but include masks, we get here with monotonic # values only, meaning no NA diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py index 6f33b18b19c51..1b322b1797144 100644 --- a/pandas/tests/extension/test_categorical.py +++ b/pandas/tests/extension/test_categorical.py @@ -75,11 +75,6 @@ def data_for_grouping(): class TestCategorical(base.ExtensionTests): - @pytest.mark.xfail(reason="Memory usage doesn't match") - def test_memory_usage(self, data): - # TODO: Is this deliberate? - super().test_memory_usage(data) - def test_contains(self, data, data_missing): # GH-37867 # na value handling in Categorical.__contains__ is deprecated.
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55342
2023-10-01T02:12:53Z
2023-12-27T19:09:17Z
2023-12-27T19:09:17Z
2023-12-28T02:00:02Z
ENH: Implement convert_dtypes on block level
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 716d1a78f93c5..3cac25600bd0e 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1133,7 +1133,7 @@ def convert_dtypes( base_dtype = inferred_dtype if ( base_dtype.kind == "O" # type: ignore[union-attr] - and len(input_array) > 0 + and input_array.size > 0 and isna(input_array).all() ): import pyarrow as pa diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 7918e43b48719..a37cad50ce6a6 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6940,36 +6940,16 @@ def convert_dtypes( dtype: string """ check_dtype_backend(dtype_backend) - if self.ndim == 1: - return self._convert_dtypes( - infer_objects, - convert_string, - convert_integer, - convert_boolean, - convert_floating, - dtype_backend=dtype_backend, - ) - else: - results = [ - col._convert_dtypes( - infer_objects, - convert_string, - convert_integer, - convert_boolean, - convert_floating, - dtype_backend=dtype_backend, - ) - for col_name, col in self.items() - ] - if len(results) > 0: - result = concat(results, axis=1, copy=False, keys=self.columns) - cons = cast(type["DataFrame"], self._constructor) - result = cons(result) - result = result.__finalize__(self, method="convert_dtypes") - # https://github.com/python/mypy/issues/8354 - return cast(Self, result) - else: - return self.copy(deep=None) + new_mgr = self._mgr.convert_dtypes( # type: ignore[union-attr] + infer_objects=infer_objects, + convert_string=convert_string, + convert_integer=convert_integer, + convert_boolean=convert_boolean, + convert_floating=convert_floating, + dtype_backend=dtype_backend, + ) + res = self._constructor_from_mgr(new_mgr, axes=new_mgr.axes) + return res.__finalize__(self, method="convert_dtypes") # ---------------------------------------------------------------------- # Filling NA's diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index a3c2ede55dabf..b44afa83d9ee2 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -33,6 +33,7 @@ from pandas._typing import ( ArrayLike, AxisInt, + DtypeBackend, DtypeObj, F, FillnaOptions, @@ -55,6 +56,7 @@ from pandas.core.dtypes.cast import ( LossySetitemError, can_hold_element, + convert_dtypes, find_result_type, maybe_downcast_to_dtype, np_can_hold_element, @@ -636,6 +638,52 @@ def convert( res_values = maybe_coerce_values(res_values) return [self.make_block(res_values, refs=refs)] + def convert_dtypes( + self, + copy: bool, + using_cow: bool, + infer_objects: bool = True, + convert_string: bool = True, + convert_integer: bool = True, + convert_boolean: bool = True, + convert_floating: bool = True, + dtype_backend: DtypeBackend = "numpy_nullable", + ) -> list[Block]: + if infer_objects and self.is_object: + blks = self.convert(copy=False, using_cow=using_cow) + else: + blks = [self] + + if not any( + [convert_floating, convert_integer, convert_boolean, convert_string] + ): + return [b.copy(deep=copy) for b in blks] + + rbs = [] + for blk in blks: + # Determine dtype column by column + sub_blks = [blk] if blk.ndim == 1 or self.shape[0] == 1 else blk._split() + dtypes = [ + convert_dtypes( + b.values, + convert_string, + convert_integer, + convert_boolean, + convert_floating, + infer_objects, + dtype_backend, + ) + for b in sub_blks + ] + if all(dtype == self.dtype for dtype in dtypes): + # Avoid block splitting if no dtype changes + rbs.append(blk.copy(deep=copy)) + continue + + for dtype, b in zip(dtypes, sub_blks): + rbs.append(b.astype(dtype=dtype, copy=copy, squeeze=b.ndim != 1)) + return rbs + # --------------------------------------------------------------------- # Array-Like Methods @@ -651,6 +699,7 @@ def astype( copy: bool = False, errors: IgnoreRaise = "raise", using_cow: bool = False, + squeeze: bool = False, ) -> Block: """ Coerce to the new dtype. @@ -665,12 +714,18 @@ def astype( - ``ignore`` : suppress exceptions. On error return original object using_cow: bool, default False Signaling if copy on write copy logic is used. + squeeze : bool, default False + squeeze values to ndim=1 if only one column is given Returns ------- Block """ values = self.values + if squeeze and values.ndim == 2: + if values.shape[0] != 1: + raise ValueError("Can not squeeze with more than one column.") + values = values[0, :] # type: ignore[call-overload] new_values = astype_array_safe(values, dtype, copy=copy, errors=errors) diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 13c039cef3f91..ac930efbcedf1 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -464,6 +464,16 @@ def convert(self, copy: bool | None) -> Self: return self.apply("convert", copy=copy, using_cow=using_copy_on_write()) + def convert_dtypes(self, **kwargs): + if using_copy_on_write(): + copy = False + else: + copy = True + + return self.apply( + "convert_dtypes", copy=copy, using_cow=using_copy_on_write(), **kwargs + ) + def get_values_for_csv( self, *, float_format, date_format, decimal, na_rep: str = "nan", quoting=None ) -> Self: diff --git a/pandas/core/series.py b/pandas/core/series.py index a021ea7961cc0..1bbd10429ea22 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -61,7 +61,6 @@ from pandas.core.dtypes.astype import astype_is_view from pandas.core.dtypes.cast import ( LossySetitemError, - convert_dtypes, maybe_box_native, maybe_cast_pointwise_result, ) @@ -167,7 +166,6 @@ CorrelationMethod, DropKeep, Dtype, - DtypeBackend, DtypeObj, FilePath, Frequency, @@ -5556,39 +5554,6 @@ def between( return lmask & rmask - # ---------------------------------------------------------------------- - # Convert to types that support pd.NA - - def _convert_dtypes( - self, - infer_objects: bool = True, - convert_string: bool = True, - convert_integer: bool = True, - convert_boolean: bool = True, - convert_floating: bool = True, - dtype_backend: DtypeBackend = "numpy_nullable", - ) -> Series: - input_series = self - if infer_objects: - input_series = input_series.infer_objects() - if is_object_dtype(input_series.dtype): - input_series = input_series.copy(deep=None) - - if convert_string or convert_integer or convert_boolean or convert_floating: - inferred_dtype = convert_dtypes( - input_series._values, - convert_string, - convert_integer, - convert_boolean, - convert_floating, - infer_objects, - dtype_backend, - ) - result = input_series.astype(inferred_dtype) - else: - result = input_series.copy(deep=None) - return result - # error: Cannot determine type of 'isna' @doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"]) # type: ignore[has-type] def isna(self) -> Series: diff --git a/pandas/tests/frame/methods/test_convert_dtypes.py b/pandas/tests/frame/methods/test_convert_dtypes.py index c2b1016e88402..4c371afcc4e00 100644 --- a/pandas/tests/frame/methods/test_convert_dtypes.py +++ b/pandas/tests/frame/methods/test_convert_dtypes.py @@ -175,3 +175,17 @@ def test_convert_dtypes_pyarrow_timestamp(self): expected = ser.astype("timestamp[ms][pyarrow]") result = expected.convert_dtypes(dtype_backend="pyarrow") tm.assert_series_equal(result, expected) + + def test_convert_dtypes_avoid_block_splitting(self): + # GH#55341 + df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": "a"}) + result = df.convert_dtypes(convert_integer=False) + expected = pd.DataFrame( + { + "a": [1, 2, 3], + "b": [4, 5, 6], + "c": pd.Series(["a"] * 3, dtype="string[python]"), + } + ) + tm.assert_frame_equal(result, expected) + assert result._mgr.nblocks == 2
The current implementation is pretty bad in the sense that we will always end up splitting blocks even if we don't want to touch the block at all. Pushing this to the Block level let's us solve that problem.
https://api.github.com/repos/pandas-dev/pandas/pulls/55341
2023-09-30T22:38:29Z
2023-11-17T17:44:29Z
2023-11-17T17:44:29Z
2023-11-17T18:12:41Z
Implement masked algorithm for mode
diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 48aee18c90456..066f0b6d247a1 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -318,6 +318,7 @@ Performance improvements - Performance improvement in :meth:`MultiIndex.get_indexer` when ``method`` is not ``None`` (:issue:`55839`) - Performance improvement in :meth:`Series.duplicated` for pyarrow dtypes (:issue:`55255`) - Performance improvement in :meth:`Series.str` methods (:issue:`55736`) +- Performance improvement in :meth:`Series.value_counts` and :meth:`Series.mode` for masked dtypes (:issue:`54984`, :issue:`55340`) - Performance improvement in :meth:`SeriesGroupBy.idxmax`, :meth:`SeriesGroupBy.idxmin`, :meth:`DataFrameGroupBy.idxmax`, :meth:`DataFrameGroupBy.idxmin` (:issue:`54234`) - Performance improvement when indexing into a non-unique index (:issue:`55816`) - Performance improvement when indexing with more than 4 keys (:issue:`54550`) diff --git a/pandas/_libs/hashtable_func_helper.pxi.in b/pandas/_libs/hashtable_func_helper.pxi.in index 19acd4acbdee7..336af306d410f 100644 --- a/pandas/_libs/hashtable_func_helper.pxi.in +++ b/pandas/_libs/hashtable_func_helper.pxi.in @@ -404,12 +404,13 @@ def mode(ndarray[htfunc_t] values, bint dropna, const uint8_t[:] mask=None): cdef: ndarray[htfunc_t] keys ndarray[htfunc_t] modes + ndarray[uint8_t] res_mask = None int64_t[::1] counts int64_t count, _, max_count = -1 - Py_ssize_t nkeys, k, j = 0 + Py_ssize_t nkeys, k, na_counter, j = 0 - keys, counts, _ = value_count(values, dropna, mask=mask) + keys, counts, na_counter = value_count(values, dropna, mask=mask) nkeys = len(keys) modes = np.empty(nkeys, dtype=values.dtype) @@ -440,7 +441,10 @@ def mode(ndarray[htfunc_t] values, bint dropna, const uint8_t[:] mask=None): modes[j] = keys[k] - return modes[:j + 1] + if na_counter > 0: + res_mask = np.zeros(j+1, dtype=np.bool_) + res_mask[j] = True + return modes[:j + 1], res_mask {{py: diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index dd45969a13fd7..6aca5e72e0197 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -1034,7 +1034,10 @@ def mode( values = _ensure_data(values) - npresult = htable.mode(values, dropna=dropna, mask=mask) + npresult, res_mask = htable.mode(values, dropna=dropna, mask=mask) + if res_mask is not None: + return npresult, res_mask # type: ignore[return-value] + try: npresult = np.sort(npresult) except TypeError as err: diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index c8447397c7bfe..58909643ed46a 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -69,6 +69,7 @@ from pandas.core.algorithms import ( factorize_array, isin, + mode, take, ) from pandas.core.array_algos import ( @@ -1069,6 +1070,15 @@ def value_counts(self, dropna: bool = True) -> Series: ) return Series(arr, index=index, name="count", copy=False) + def _mode(self, dropna: bool = True) -> Self: + if dropna: + result = mode(self._data, dropna=dropna, mask=self._mask) + res_mask = np.zeros(result.shape, dtype=np.bool_) + else: + result, res_mask = mode(self._data, dropna=dropna, mask=self._mask) + result = type(self)(result, res_mask) # type: ignore[arg-type] + return result[result.argsort()] + @doc(ExtensionArray.equals) def equals(self, other) -> bool: if type(self) != type(other): diff --git a/pandas/tests/libs/test_hashtable.py b/pandas/tests/libs/test_hashtable.py index 2c8f4c4149528..e54764f9ac4a6 100644 --- a/pandas/tests/libs/test_hashtable.py +++ b/pandas/tests/libs/test_hashtable.py @@ -644,13 +644,13 @@ def test_mode(self, dtype, writable): values = np.repeat(np.arange(N).astype(dtype), 5) values[0] = 42 values.flags.writeable = writable - result = ht.mode(values, False) + result = ht.mode(values, False)[0] assert result == 42 def test_mode_stable(self, dtype, writable): values = np.array([2, 1, 5, 22, 3, -1, 8]).astype(dtype) values.flags.writeable = writable - keys = ht.mode(values, False) + keys = ht.mode(values, False)[0] tm.assert_numpy_array_equal(keys, values) @@ -658,7 +658,7 @@ def test_modes_with_nans(): # GH42688, nans aren't mangled nulls = [pd.NA, np.nan, pd.NaT, None] values = np.array([True] + nulls * 2, dtype=np.object_) - modes = ht.mode(values, False) + modes = ht.mode(values, False)[0] assert modes.size == len(nulls) @@ -724,8 +724,8 @@ def test_ismember_no(self, dtype): def test_mode(self, dtype): values = np.array([42, np.nan, np.nan, np.nan], dtype=dtype) - assert ht.mode(values, True) == 42 - assert np.isnan(ht.mode(values, False)) + assert ht.mode(values, True)[0] == 42 + assert np.isnan(ht.mode(values, False)[0]) def test_ismember_tuple_with_nans(): diff --git a/pandas/tests/series/test_reductions.py b/pandas/tests/series/test_reductions.py index fbdf843a998bb..f79e58427688b 100644 --- a/pandas/tests/series/test_reductions.py +++ b/pandas/tests/series/test_reductions.py @@ -29,6 +29,28 @@ def test_mode_extension_dtype(as_period): tm.assert_series_equal(res, ser) +def test_mode_nullable_dtype(any_numeric_ea_dtype): + # GH#55340 + ser = Series([1, 3, 2, pd.NA, 3, 2, pd.NA], dtype=any_numeric_ea_dtype) + result = ser.mode(dropna=False) + expected = Series([2, 3, pd.NA], dtype=any_numeric_ea_dtype) + tm.assert_series_equal(result, expected) + + result = ser.mode(dropna=True) + expected = Series([2, 3], dtype=any_numeric_ea_dtype) + tm.assert_series_equal(result, expected) + + ser[-1] = pd.NA + + result = ser.mode(dropna=True) + expected = Series([2, 3], dtype=any_numeric_ea_dtype) + tm.assert_series_equal(result, expected) + + result = ser.mode(dropna=False) + expected = Series([pd.NA], dtype=any_numeric_ea_dtype) + tm.assert_series_equal(result, expected) + + def test_reductions_td64_with_nat(): # GH#8617 ser = Series([0, pd.NaT], dtype="m8[ns]")
- [ ] closes #45776 (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55340
2023-09-30T20:07:39Z
2023-11-18T23:45:04Z
2023-11-18T23:45:04Z
2023-11-18T23:45:07Z
BUG: added default value of date_format in to_csv function (#55321)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 738f4cbe6bc43..f87823c2aa463 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3767,7 +3767,7 @@ def to_csv( quotechar: str = '"', lineterminator: str | None = None, chunksize: int | None = None, - date_format: str | None = None, + date_format: str | None = '%Y-%m-%d %H:%M:%S.%f%z', doublequote: bool_t = True, escapechar: str | None = None, decimal: str = ".",
- [x] closes #55321 (Replace xxxx with the GitHub issue number) - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
https://api.github.com/repos/pandas-dev/pandas/pulls/55339
2023-09-30T19:56:49Z
2023-09-30T21:20:12Z
null
2023-09-30T21:20:12Z
Raises a ValueError for not handled dtypes in pyarrow Conversion.
diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 7743f762d8898..878a2d6f20cdc 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -251,6 +251,7 @@ Other Deprecations - Deprecated downcasting behavior in :meth:`Series.where`, :meth:`DataFrame.where`, :meth:`Series.mask`, :meth:`DataFrame.mask`, :meth:`Series.clip`, :meth:`DataFrame.clip`; in a future version these will not infer object-dtype columns to non-object dtype, or all-round floats to integer dtype. Call ``result.infer_objects(copy=False)`` on the result for object inference, or explicitly cast floats to ints. To opt in to the future version, use ``pd.set_option("future.no_silent_downcasting", True)`` (:issue:`53656`) - Deprecated including the groups in computations when using :meth:`DataFrameGroupBy.apply` and :meth:`DataFrameGroupBy.resample`; pass ``include_groups=False`` to exclude the groups (:issue:`7155`) - Deprecated not passing a tuple to :class:`DataFrameGroupBy.get_group` or :class:`SeriesGroupBy.get_group` when grouping by a length-1 list-like (:issue:`25971`) +- Deprecated string ``A`` denoting frequency in :class:`YearEnd` and strings ``A-DEC``, ``A-JAN``, etc. denoting annual frequencies with various fiscal year ends (:issue:`52536`) - Deprecated strings ``S``, ``U``, and ``N`` denoting units in :func:`to_timedelta` (:issue:`52536`) - Deprecated strings ``T``, ``S``, ``L``, ``U``, and ``N`` denoting frequencies in :class:`Minute`, :class:`Second`, :class:`Milli`, :class:`Micro`, :class:`Nano` (:issue:`52536`) - Deprecated strings ``T``, ``S``, ``L``, ``U``, and ``N`` denoting units in :class:`Timedelta` (:issue:`52536`) @@ -283,6 +284,7 @@ Bug fixes - Bug in :meth:`DataFrame.apply` where passing ``raw=True`` ignored ``args`` passed to the applied function (:issue:`55009`) - Bug in :meth:`pandas.DataFrame.melt` where it would not preserve the datetime (:issue:`55254`) - Bug in :meth:`pandas.read_excel` with a ODS file without cached formatted cell for float values (:issue:`55219`) +- Categorical ^^^^^^^^^^^ @@ -312,6 +314,7 @@ Numeric Conversion ^^^^^^^^^^ - Bug in :meth:`Series.convert_dtypes` not converting all NA column to ``null[pyarrow]`` (:issue:`55346`) +- Raise ValueError in :meth:`PandasColumn._dtype_from_pandasdtype` for currently unhandled dtypes. (:issue:`55332`) - Strings @@ -362,7 +365,9 @@ Plotting Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ -- +- Fixed bug in :meth:`DataFrame.resample` not respecting ``closed`` and ``label`` arguments for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55282`) +- Fixed bug in :meth:`DataFrame.resample` where bin edges were not correct for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55281`) +- Fixed bug in :meth:`DataFrame.resample` where bin edges were not correct for :class:`~pandas.tseries.offsets.MonthBegin` (:issue:`55271`) - Reshaping @@ -389,6 +394,8 @@ Styler Other ^^^^^ - Bug in :func:`cut` incorrectly allowing cutting of timezone-aware datetimes with timezone-naive bins (:issue:`54964`) +- Bug in rendering a :class:`Series` with a :class:`MultiIndex` when one of the index level's names is 0 not having that name displayed (:issue:`55415`) +- .. ***DO NOT USE THIS SECTION*** diff --git a/pandas/core/interchange/column.py b/pandas/core/interchange/column.py index acfbc5d9e6c62..6686dd8614771 100644 --- a/pandas/core/interchange/column.py +++ b/pandas/core/interchange/column.py @@ -144,7 +144,12 @@ def _dtype_from_pandasdtype(self, dtype) -> tuple[DtypeKind, int, str, str]: elif isinstance(dtype, DatetimeTZDtype): byteorder = dtype.base.byteorder # type: ignore[union-attr] else: - byteorder = dtype.byteorder + try: + byteorder = dtype.byteorder + except AttributeError: + raise ValueError( + f"Data type {dtype} not supported by interchange protocol" + ) return kind, dtype.itemsize * 8, dtype_to_arrow_c_fmt(dtype), byteorder diff --git a/pandas/tests/interchange/test_impl.py b/pandas/tests/interchange/test_impl.py index 8a25a2c1889f3..ccb710429ead8 100644 --- a/pandas/tests/interchange/test_impl.py +++ b/pandas/tests/interchange/test_impl.py @@ -326,3 +326,17 @@ def test_interchange_from_non_pandas_tz_aware(): dtype="datetime64[us, Asia/Kathmandu]", ) tm.assert_frame_equal(expected, result) + + +def test_not_handled() -> None: + pa = pytest.importorskip("pyarrow", "11.0.0") + df = pd.DataFrame( + { + "b": pd.Series([True, False], dtype="boolean"), + } + ) + with pytest.raises( + ValueError, + match="Data type boolean not supported by interchange protocol", + ): + pa.interchange.from_dataframe(df)
Raises a ValueError in PandasColumn._dtype_from_pandasdtype for the following unhandled dtypes: 'b', 'B', 'S', 'a', 'V'. - [X] closes #55332 (Replace xxxx with the GitHub issue number) - [X] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [X] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [X] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [X] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55338
2023-09-30T19:46:22Z
2023-10-07T14:07:24Z
null
2023-10-07T14:07:24Z
DOC: Adjust user guide for CoW docs
diff --git a/doc/source/user_guide/copy_on_write.rst b/doc/source/user_guide/copy_on_write.rst index 59bdb1926895f..d0c57b56585db 100644 --- a/doc/source/user_guide/copy_on_write.rst +++ b/doc/source/user_guide/copy_on_write.rst @@ -7,8 +7,8 @@ Copy-on-Write (CoW) ******************* Copy-on-Write was first introduced in version 1.5.0. Starting from version 2.0 most of the -optimizations that become possible through CoW are implemented and supported. A complete list -can be found at :ref:`Copy-on-Write optimizations <copy_on_write.optimizations>`. +optimizations that become possible through CoW are implemented and supported. All possible +optimizations are supported starting from pandas 2.1. We expect that CoW will be enabled by default in version 3.0. @@ -154,6 +154,77 @@ With copy on write this can be done by using ``loc``. df.loc[df["bar"] > 5, "foo"] = 100 +Read-only NumPy arrays +---------------------- + +Accessing the underlying NumPy array of a DataFrame will return a read-only array if the array +shares data with the initial DataFrame: + +The array is a copy if the initial DataFrame consists of more than one array: + + +.. ipython:: python + + df = pd.DataFrame({"a": [1, 2], "b": [1.5, 2.5]}) + df.to_numpy() + +The array shares data with the DataFrame if the DataFrame consists of only one NumPy array: + +.. ipython:: python + + df = pd.DataFrame({"a": [1, 2], "b": [3, 4]}) + df.to_numpy() + +This array is read-only, which means that it can't be modified inplace: + +.. ipython:: python + :okexcept: + + arr = df.to_numpy() + arr[0, 0] = 100 + +The same holds true for a Series, since a Series always consists of a single array. + +There are two potential solution to this: + +- Trigger a copy manually if you want to avoid updating DataFrames that share memory with your array. +- Make the array writeable. This is a more performant solution but circumvents Copy-on-Write rules, so + it should be used with caution. + +.. ipython:: python + + arr = df.to_numpy() + arr.flags.writeable = True + arr[0, 0] = 100 + arr + +Patterns to avoid +----------------- + +No defensive copy will be performed if two objects share the same data while +you are modifying one object inplace. + +.. ipython:: python + + df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df2 = df.reset_index() + df2.iloc[0, 0] = 100 + +This creates two objects that share data and thus the setitem operation will trigger a +copy. This is not necessary if the initial object ``df`` isn't needed anymore. +Simply reassigning to the same variable will invalidate the reference that is +held by the object. + +.. ipython:: python + + df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df = df.reset_index() + df.iloc[0, 0] = 100 + +No copy is necessary in this example. +Creating multiple references keeps unnecessary references alive +and thus will hurt performance with Copy-on-Write. + .. _copy_on_write.optimizations: Copy-on-Write optimizations @@ -161,59 +232,8 @@ Copy-on-Write optimizations A new lazy copy mechanism that defers the copy until the object in question is modified and only if this object shares data with another object. This mechanism was added to -following methods: - - - :meth:`DataFrame.reset_index` / :meth:`Series.reset_index` - - :meth:`DataFrame.set_index` - - :meth:`DataFrame.set_axis` / :meth:`Series.set_axis` - - :meth:`DataFrame.set_flags` / :meth:`Series.set_flags` - - :meth:`DataFrame.rename_axis` / :meth:`Series.rename_axis` - - :meth:`DataFrame.reindex` / :meth:`Series.reindex` - - :meth:`DataFrame.reindex_like` / :meth:`Series.reindex_like` - - :meth:`DataFrame.assign` - - :meth:`DataFrame.drop` - - :meth:`DataFrame.dropna` / :meth:`Series.dropna` - - :meth:`DataFrame.select_dtypes` - - :meth:`DataFrame.align` / :meth:`Series.align` - - :meth:`Series.to_frame` - - :meth:`DataFrame.rename` / :meth:`Series.rename` - - :meth:`DataFrame.add_prefix` / :meth:`Series.add_prefix` - - :meth:`DataFrame.add_suffix` / :meth:`Series.add_suffix` - - :meth:`DataFrame.drop_duplicates` / :meth:`Series.drop_duplicates` - - :meth:`DataFrame.droplevel` / :meth:`Series.droplevel` - - :meth:`DataFrame.reorder_levels` / :meth:`Series.reorder_levels` - - :meth:`DataFrame.between_time` / :meth:`Series.between_time` - - :meth:`DataFrame.filter` / :meth:`Series.filter` - - :meth:`DataFrame.head` / :meth:`Series.head` - - :meth:`DataFrame.tail` / :meth:`Series.tail` - - :meth:`DataFrame.isetitem` - - :meth:`DataFrame.pipe` / :meth:`Series.pipe` - - :meth:`DataFrame.pop` / :meth:`Series.pop` - - :meth:`DataFrame.replace` / :meth:`Series.replace` - - :meth:`DataFrame.shift` / :meth:`Series.shift` - - :meth:`DataFrame.sort_index` / :meth:`Series.sort_index` - - :meth:`DataFrame.sort_values` / :meth:`Series.sort_values` - - :meth:`DataFrame.squeeze` / :meth:`Series.squeeze` - - :meth:`DataFrame.swapaxes` - - :meth:`DataFrame.swaplevel` / :meth:`Series.swaplevel` - - :meth:`DataFrame.take` / :meth:`Series.take` - - :meth:`DataFrame.to_timestamp` / :meth:`Series.to_timestamp` - - :meth:`DataFrame.to_period` / :meth:`Series.to_period` - - :meth:`DataFrame.truncate` - - :meth:`DataFrame.iterrows` - - :meth:`DataFrame.tz_convert` / :meth:`Series.tz_localize` - - :meth:`DataFrame.fillna` / :meth:`Series.fillna` - - :meth:`DataFrame.interpolate` / :meth:`Series.interpolate` - - :meth:`DataFrame.ffill` / :meth:`Series.ffill` - - :meth:`DataFrame.bfill` / :meth:`Series.bfill` - - :meth:`DataFrame.where` / :meth:`Series.where` - - :meth:`DataFrame.infer_objects` / :meth:`Series.infer_objects` - - :meth:`DataFrame.astype` / :meth:`Series.astype` - - :meth:`DataFrame.convert_dtypes` / :meth:`Series.convert_dtypes` - - :meth:`DataFrame.join` - - :meth:`DataFrame.eval` - - :func:`concat` - - :func:`merge` +methods that don't require a copy of the underlying data. Popular examples are :meth:`DataFrame.drop` for ``axis=1`` +and :meth:`DataFrame.rename`. These methods return views when Copy-on-Write is enabled, which provides a significant performance improvement compared to the regular execution.
This is mostly from my blog post, would like to backport this ideally.
https://api.github.com/repos/pandas-dev/pandas/pulls/55337
2023-09-30T18:29:33Z
2023-10-15T13:43:59Z
2023-10-15T13:43:59Z
2023-10-15T13:44:02Z
BLD: Fix gitpod meson version
diff --git a/.gitpod.yml b/.gitpod.yml index 9222639136a17..94bc32e3f9af6 100644 --- a/.gitpod.yml +++ b/.gitpod.yml @@ -13,9 +13,11 @@ tasks: mkdir -p .vscode cp gitpod/settings.json .vscode/settings.json git fetch --tags + pip install --upgrade meson python -m pip install -ve . --no-build-isolation --config-settings editable-verbose=true pre-commit install command: | + pip install --upgrade meson python -m pip install -ve . --no-build-isolation --config-settings editable-verbose=true echo "✨ Pre-build complete! You can close this terminal ✨ "
- [X] closes #55214 - [X] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). The issue is that meson is out of date in gitpod. `pip install --upgrade meson` The above command fixes the issue, so I added it to the build instructions in gitpod.yml. It could be added to the dockerfile instead, but I decided to be cautious. I added the update command to both the `init` and `command` sections because gitpod sometimes skips the `init` section and goes straight to the `command` section without installing anything. With the fix: ``` gitpod@theuerc-pandas-jy3jlfp1dg6:/workspace/pandas$ python Python 3.10.8 (main, Dec 6 2022, 14:13:21) [GCC 10.2.1 20210110] on linux Type "help", "copyright", "credits" or "license" for more information. >>> import pandas + /usr/local/bin/ninja [1/1] Generating write_version_file with a custom command >>> pandas.to_datetime("2022-01-01") Timestamp('2022-01-01 00:00:00') >>> ``` I'm running the tests in gitpod overnight and I'll check the log file in the morning. #### Update: These are the results: 661 failed, 211445 passed, 7990 skipped, 2426 xfailed, 15 xpassed, 405 warnings, 58 errors in 4524.22s (1:15:24) The failed tests are the ones that interact with either S3 or a nonexistent SQL server, so I think everything works.
https://api.github.com/repos/pandas-dev/pandas/pulls/55331
2023-09-30T02:45:33Z
2024-01-08T21:43:50Z
null
2024-01-08T21:43:50Z
TYP: read_csv's usecols
diff --git a/pandas/_typing.py b/pandas/_typing.py index f18c67fcb0c90..de01434c09c39 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -509,3 +509,12 @@ def closed(self) -> bool: # Offsets OffsetCalendar = Union[np.busdaycalendar, "AbstractHolidayCalendar"] + +# read_csv: usecols +UsecolsArgType = Union[ + SequenceNotStr[Hashable], + range, + AnyArrayLike, + Callable[[HashableT], bool], + None, +] diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index 6ce6ac71b1ddd..32f8a1fe81a9f 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -77,11 +77,11 @@ DtypeArg, DtypeBackend, FilePath, - HashableT, IndexLabel, ReadCsvBuffer, Self, StorageOptions, + UsecolsArgType, ) _doc_read_csv_and_table = ( r""" @@ -142,7 +142,7 @@ Note: ``index_col=False`` can be used to force pandas to *not* use the first column as the index, e.g., when you have a malformed file with delimiters at the end of each line. -usecols : list of Hashable or Callable, optional +usecols : Sequence of Hashable or Callable, optional Subset of columns to select, denoted either by column labels or column indices. If list-like, all elements must either be positional (i.e. integer indices into the document columns) or strings @@ -645,10 +645,7 @@ def read_csv( header: int | Sequence[int] | None | Literal["infer"] = ..., names: Sequence[Hashable] | None | lib.NoDefault = ..., index_col: IndexLabel | Literal[False] | None = ..., - usecols: list[HashableT] - | tuple[HashableT] - | Callable[[Hashable], bool] - | None = ..., + usecols: UsecolsArgType = ..., dtype: DtypeArg | None = ..., engine: CSVEngine | None = ..., converters: Mapping[Hashable, Callable] | None = ..., @@ -707,10 +704,7 @@ def read_csv( header: int | Sequence[int] | None | Literal["infer"] = ..., names: Sequence[Hashable] | None | lib.NoDefault = ..., index_col: IndexLabel | Literal[False] | None = ..., - usecols: list[HashableT] - | tuple[HashableT] - | Callable[[Hashable], bool] - | None = ..., + usecols: UsecolsArgType = ..., dtype: DtypeArg | None = ..., engine: CSVEngine | None = ..., converters: Mapping[Hashable, Callable] | None = ..., @@ -770,10 +764,7 @@ def read_csv( header: int | Sequence[int] | None | Literal["infer"] = ..., names: Sequence[Hashable] | None | lib.NoDefault = ..., index_col: IndexLabel | Literal[False] | None = ..., - usecols: list[HashableT] - | tuple[HashableT] - | Callable[[Hashable], bool] - | None = ..., + usecols: UsecolsArgType = ..., dtype: DtypeArg | None = ..., engine: CSVEngine | None = ..., converters: Mapping[Hashable, Callable] | None = ..., @@ -833,10 +824,7 @@ def read_csv( header: int | Sequence[int] | None | Literal["infer"] = ..., names: Sequence[Hashable] | None | lib.NoDefault = ..., index_col: IndexLabel | Literal[False] | None = ..., - usecols: list[HashableT] - | tuple[HashableT] - | Callable[[Hashable], bool] - | None = ..., + usecols: UsecolsArgType = ..., dtype: DtypeArg | None = ..., engine: CSVEngine | None = ..., converters: Mapping[Hashable, Callable] | None = ..., @@ -907,10 +895,7 @@ def read_csv( header: int | Sequence[int] | None | Literal["infer"] = "infer", names: Sequence[Hashable] | None | lib.NoDefault = lib.no_default, index_col: IndexLabel | Literal[False] | None = None, - usecols: list[HashableT] - | tuple[HashableT] - | Callable[[Hashable], bool] - | None = None, + usecols: UsecolsArgType = None, # General Parsing Configuration dtype: DtypeArg | None = None, engine: CSVEngine | None = None, @@ -1005,10 +990,7 @@ def read_table( header: int | Sequence[int] | None | Literal["infer"] = ..., names: Sequence[Hashable] | None | lib.NoDefault = ..., index_col: IndexLabel | Literal[False] | None = ..., - usecols: list[HashableT] - | tuple[HashableT] - | Callable[[Hashable], bool] - | None = ..., + usecols: UsecolsArgType = ..., dtype: DtypeArg | None = ..., engine: CSVEngine | None = ..., converters: Mapping[Hashable, Callable] | None = ..., @@ -1065,10 +1047,7 @@ def read_table( header: int | Sequence[int] | None | Literal["infer"] = ..., names: Sequence[Hashable] | None | lib.NoDefault = ..., index_col: IndexLabel | Literal[False] | None = ..., - usecols: list[HashableT] - | tuple[HashableT] - | Callable[[Hashable], bool] - | None = ..., + usecols: UsecolsArgType = ..., dtype: DtypeArg | None = ..., engine: CSVEngine | None = ..., converters: Mapping[Hashable, Callable] | None = ..., @@ -1125,10 +1104,7 @@ def read_table( header: int | Sequence[int] | None | Literal["infer"] = ..., names: Sequence[Hashable] | None | lib.NoDefault = ..., index_col: IndexLabel | Literal[False] | None = ..., - usecols: list[HashableT] - | tuple[HashableT] - | Callable[[Hashable], bool] - | None = ..., + usecols: UsecolsArgType = ..., dtype: DtypeArg | None = ..., engine: CSVEngine | None = ..., converters: Mapping[Hashable, Callable] | None = ..., @@ -1185,10 +1161,7 @@ def read_table( header: int | Sequence[int] | None | Literal["infer"] = ..., names: Sequence[Hashable] | None | lib.NoDefault = ..., index_col: IndexLabel | Literal[False] | None = ..., - usecols: list[HashableT] - | tuple[HashableT] - | Callable[[Hashable], bool] - | None = ..., + usecols: UsecolsArgType = ..., dtype: DtypeArg | None = ..., engine: CSVEngine | None = ..., converters: Mapping[Hashable, Callable] | None = ..., @@ -1258,10 +1231,7 @@ def read_table( header: int | Sequence[int] | None | Literal["infer"] = "infer", names: Sequence[Hashable] | None | lib.NoDefault = lib.no_default, index_col: IndexLabel | Literal[False] | None = None, - usecols: list[HashableT] - | tuple[HashableT] - | Callable[[Hashable], bool] - | None = None, + usecols: UsecolsArgType = None, # General Parsing Configuration dtype: DtypeArg | None = None, engine: CSVEngine | None = None,
This makes read_csv's usecols compatible with the tests from pandas-stubs. The documentation does mention only list, Callable, and None! This notably excludes range, tuple, and ndarray. I'm happy to tighten the types to the documented types or adjust the documentation. edit: The pandas tests cover list, tuple, set, nd.array and probably more -> I adjusted the documentation
https://api.github.com/repos/pandas-dev/pandas/pulls/55330
2023-09-30T02:18:11Z
2023-10-04T14:47:00Z
2023-10-04T14:47:00Z
2023-12-10T04:33:42Z
DOC: add parameters and examples to CustomBusinessMonthBegin/End
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 74398eb0e2405..8fdba8992f627 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -4329,28 +4329,6 @@ cdef class CustomBusinessHour(BusinessHour): cdef class _CustomBusinessMonth(BusinessMixin): - """ - DateOffset subclass representing custom business month(s). - - Increments between beginning/end of month dates. - - Parameters - ---------- - n : int, default 1 - The number of months represented. - normalize : bool, default False - Normalize start/end dates to midnight before generating date range. - weekmask : str, Default 'Mon Tue Wed Thu Fri' - Weekmask of valid business days, passed to ``numpy.busdaycalendar``. - holidays : list - List/array of dates to exclude from the set of valid business days, - passed to ``numpy.busdaycalendar``. - calendar : np.busdaycalendar - Calendar to integrate. - offset : timedelta, default timedelta(0) - Time offset to apply. - """ - _attributes = tuple( ["n", "normalize", "weekmask", "holidays", "calendar", "offset"] ) @@ -4426,10 +4404,124 @@ cdef class _CustomBusinessMonth(BusinessMixin): cdef class CustomBusinessMonthEnd(_CustomBusinessMonth): + """ + DateOffset subclass representing custom business month(s). + + Increments between end of month dates. + + Parameters + ---------- + n : int, default 1 + The number of months represented. + normalize : bool, default False + Normalize end dates to midnight before generating date range. + weekmask : str, Default 'Mon Tue Wed Thu Fri' + Weekmask of valid business days, passed to ``numpy.busdaycalendar``. + holidays : list + List/array of dates to exclude from the set of valid business days, + passed to ``numpy.busdaycalendar``. + calendar : np.busdaycalendar + Calendar to integrate. + offset : timedelta, default timedelta(0) + Time offset to apply. + + See Also + -------- + :class:`~pandas.tseries.offsets.DateOffset` : Standard kind of date increment. + + Examples + -------- + In the example below we use the default parameters. + + >>> ts = pd.Timestamp(2022, 8, 5) + >>> ts + pd.offsets.CustomBusinessMonthEnd() + Timestamp('2022-08-31 00:00:00') + + Custom business month end can be specified by ``weekmask`` parameter. + To convert the returned datetime object to its string representation + the function strftime() is used in the next example. + + >>> import datetime as dt + >>> freq = pd.offsets.CustomBusinessMonthEnd(weekmask="Wed Thu") + >>> pd.date_range(dt.datetime(2022, 7, 10), dt.datetime(2022, 12, 18), + ... freq=freq).strftime('%a %d %b %Y %H:%M') + Index(['Thu 28 Jul 2022 00:00', 'Wed 31 Aug 2022 00:00', + 'Thu 29 Sep 2022 00:00', 'Thu 27 Oct 2022 00:00', + 'Wed 30 Nov 2022 00:00'], + dtype='object') + + Using NumPy business day calendar you can define custom holidays. + + >>> import datetime as dt + >>> bdc = np.busdaycalendar(holidays=['2022-08-01', '2022-09-30', + ... '2022-10-31', '2022-11-01']) + >>> freq = pd.offsets.CustomBusinessMonthEnd(calendar=bdc) + >>> pd.date_range(dt.datetime(2022, 7, 10), dt.datetime(2022, 11, 10), freq=freq) + DatetimeIndex(['2022-07-29', '2022-08-31', '2022-09-29', '2022-10-28'], + dtype='datetime64[ns]', freq='CBM') + """ + _prefix = "CBM" cdef class CustomBusinessMonthBegin(_CustomBusinessMonth): + """ + DateOffset subclass representing custom business month(s). + + Increments between beginning of month dates. + + Parameters + ---------- + n : int, default 1 + The number of months represented. + normalize : bool, default False + Normalize start dates to midnight before generating date range. + weekmask : str, Default 'Mon Tue Wed Thu Fri' + Weekmask of valid business days, passed to ``numpy.busdaycalendar``. + holidays : list + List/array of dates to exclude from the set of valid business days, + passed to ``numpy.busdaycalendar``. + calendar : np.busdaycalendar + Calendar to integrate. + offset : timedelta, default timedelta(0) + Time offset to apply. + + See Also + -------- + :class:`~pandas.tseries.offsets.DateOffset` : Standard kind of date increment. + + Examples + -------- + In the example below we use the default parameters. + + >>> ts = pd.Timestamp(2022, 8, 5) + >>> ts + pd.offsets.CustomBusinessMonthBegin() + Timestamp('2022-09-01 00:00:00') + + Custom business month start can be specified by ``weekmask`` parameter. + To convert the returned datetime object to its string representation + the function strftime() is used in the next example. + + >>> import datetime as dt + >>> freq = pd.offsets.CustomBusinessMonthBegin(weekmask="Wed Thu") + >>> pd.date_range(dt.datetime(2022, 7, 10), dt.datetime(2022, 12, 18), + ... freq=freq).strftime('%a %d %b %Y %H:%M') + Index(['Wed 03 Aug 2022 00:00', 'Thu 01 Sep 2022 00:00', + 'Wed 05 Oct 2022 00:00', 'Wed 02 Nov 2022 00:00', + 'Thu 01 Dec 2022 00:00'], + dtype='object') + + Using NumPy business day calendar you can define custom holidays. + + >>> import datetime as dt + >>> bdc = np.busdaycalendar(holidays=['2022-08-01', '2022-09-30', + ... '2022-10-31', '2022-11-01']) + >>> freq = pd.offsets.CustomBusinessMonthBegin(calendar=bdc) + >>> pd.date_range(dt.datetime(2022, 7, 10), dt.datetime(2022, 11, 10), freq=freq) + DatetimeIndex(['2022-08-02', '2022-09-01', '2022-10-03', '2022-11-02'], + dtype='datetime64[ns]', freq='CBMS') + """ + _prefix = "CBMS"
In the documentation of offsets classes: [CustomBusinessMonthEnd](https://pandas.pydata.org/docs/dev/reference/api/pandas.tseries.offsets.CustomBusinessMonthEnd.html) and [CustomBusinessMonthBegin](https://pandas.pydata.org/docs/dev/reference/api/pandas.tseries.offsets.CustomBusinessMonthBegin.html) there are no parameters, see also and examples sections. I added parameters and examples.
https://api.github.com/repos/pandas-dev/pandas/pulls/55328
2023-09-29T19:35:58Z
2023-10-02T09:48:18Z
2023-10-02T09:48:18Z
2023-10-02T09:48:19Z
COMPAT: Fix warning with numba >= 0.58.0
diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 129f5cedb86c2..7bf9041412b74 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -900,6 +900,7 @@ Other - Bug in rendering a :class:`Series` with a :class:`MultiIndex` when one of the index level's names is 0 not having that name displayed (:issue:`55415`) - Bug in the error message when assigning an empty :class:`DataFrame` to a column (:issue:`55956`) - Bug when time-like strings were being cast to :class:`ArrowDtype` with ``pyarrow.time64`` type (:issue:`56463`) +- Fixed a spurious deprecation warning from ``numba`` >= 0.58.0 when passing a numpy ufunc in :class:`pandas.core.window.Rolling.apply` with ``engine="numba"`` (:issue:`55247`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/util/numba_.py b/pandas/core/util/numba_.py index b8d489179338b..4825c9fee24b1 100644 --- a/pandas/core/util/numba_.py +++ b/pandas/core/util/numba_.py @@ -1,11 +1,14 @@ """Common utilities for Numba operations""" from __future__ import annotations +import types from typing import ( TYPE_CHECKING, Callable, ) +import numpy as np + from pandas.compat._optional import import_optional_dependency from pandas.errors import NumbaUtilError @@ -83,6 +86,12 @@ def jit_user_function(func: Callable) -> Callable: if numba.extending.is_jitted(func): # Don't jit a user passed jitted function numba_func = func + elif getattr(np, func.__name__, False) is func or isinstance( + func, types.BuiltinFunctionType + ): + # Not necessary to jit builtins or np functions + # This will mess up register_jitable + numba_func = func else: numba_func = numba.extending.register_jitable(func) diff --git a/pandas/tests/window/test_numba.py b/pandas/tests/window/test_numba.py index b1cc7ec186f19..139e1ff7f65fd 100644 --- a/pandas/tests/window/test_numba.py +++ b/pandas/tests/window/test_numba.py @@ -446,3 +446,10 @@ def test_table_method_ewm(self, data, method, axis, nogil, parallel, nopython): engine_kwargs=engine_kwargs, engine="numba" ) tm.assert_frame_equal(result, expected) + + +@td.skip_if_no("numba") +def test_npfunc_no_warnings(): + df = DataFrame({"col1": [1, 2, 3, 4, 5]}) + with tm.assert_produces_warning(False): + df.col1.rolling(2).apply(np.prod, raw=True, engine="numba")
- [ ] closes #55247 (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55327
2023-09-29T19:19:22Z
2024-01-11T01:32:55Z
2024-01-11T01:32:55Z
2024-01-17T15:55:56Z
Bug fix where df.rolling doesn't work with certain datetime64 index t…
diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 0760840f9950a..d703c38d30063 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -248,6 +248,7 @@ Performance improvements Bug fixes ~~~~~~~~~ - Bug in :class:`AbstractHolidayCalendar` where timezone data was not propagated when computing holiday observances (:issue:`54580`) +- Bug in :class:`pandas.core.window.Rolling` where df.rolling does not work with 'datetime64[us]', 'datetime64[ms]', and 'datetime64[s]' index types (:issue:`55299`) - Bug in :class:`pandas.core.window.Rolling` where duplicate datetimelike indexes are treated as consecutive rather than equal with ``closed='left'`` and ``closed='neither'`` (:issue:`20712`) - Bug in :meth:`DataFrame.apply` where passing ``raw=True`` ignored ``args`` passed to the applied function (:issue:`55009`) - Bug in :meth:`pandas.read_excel` with a ODS file without cached formatted cell for float values (:issue:`55219`) diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 72e94d049a9de..99d46bc09be0f 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -405,6 +405,8 @@ def _index_array(self): # TODO: why do we get here with e.g. MultiIndex? if needs_i8_conversion(self._on.dtype): idx = cast("PeriodIndex | DatetimeIndex | TimedeltaIndex", self._on) + if (type(idx) == DatetimeIndex) and (idx.T.unit != "ns"): + idx = idx.astype("datetime64[ns]") return idx.asi8 return None diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index 3fe922539780d..03ff6bee49a37 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -1950,3 +1950,30 @@ def test_numeric_only_corr_cov_series(kernel, use_arg, numeric_only, dtype): op2 = getattr(rolling2, kernel) expected = op2(*arg2, numeric_only=numeric_only) tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "data_type", + [ + "datetime64[us]", + "datetime64[ms]", + "datetime64[s]", + ], +) +def test_rolling_sum_on_dtypes(data_type): + # GH 55299 + index = [ + "2019-01-03 05:11", + "2019-01-03 05:23", + "2019-01-03 05:28", + "2019-01-03 05:32", + "2019-01-03 05:36", + ] + arr = [True, False, False, True, True] + + df_exp = DataFrame({"arr": arr}, index=to_datetime(index).astype("datetime64[ns]")) + df_test = DataFrame({"arr": arr}, index=to_datetime(index).astype(data_type)) + sum_df_exp = df_exp.rolling("5min").sum() + sum_df_test = df_test.rolling("5min").sum() + + assert list(sum_df_test.values) == list(sum_df_exp.values)
- [x] closes #55299 - [x] Tests added and passed. - [x] All code checks passed. - [x] Added an entry in bug fixes in the latest `doc/source/whatsnew/v2.2.0.rst` file. This PR fixes an issue where in `df.rolling`, `rolling._win_freq_i8` is always given in nanoseconds but `rolling._index_array` is given in the unit corresponding to the dtype (e.g. s, ms, us, ns). This causes a problem when the window is calculated if the dtype is not `datetime64[ns]`. This code returns the index array in nanoseconds so it matches with `rolling._win_freq_i8`.
https://api.github.com/repos/pandas-dev/pandas/pulls/55325
2023-09-29T17:45:21Z
2023-09-29T18:10:35Z
null
2023-09-29T18:10:35Z
BUG: fix parsing of ODF time values with comments
diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 7743f762d8898..ce9255142d0c1 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -282,6 +282,7 @@ Bug fixes - Bug in :func:`pandas.api.types.is_string_dtype` while checking object array with no elements is of the string dtype (:issue:`54661`) - Bug in :meth:`DataFrame.apply` where passing ``raw=True`` ignored ``args`` passed to the applied function (:issue:`55009`) - Bug in :meth:`pandas.DataFrame.melt` where it would not preserve the datetime (:issue:`55254`) +- Bug in :meth:`pandas.read_excel` where ODS files with comments on time value cells failed to parse (:issue:`55200`) - Bug in :meth:`pandas.read_excel` with a ODS file without cached formatted cell for float values (:issue:`55219`) Categorical diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py index 277f64f636731..05f5ee5951c6c 100644 --- a/pandas/io/excel/_odfreader.py +++ b/pandas/io/excel/_odfreader.py @@ -1,5 +1,7 @@ from __future__ import annotations +import datetime +import re from typing import ( TYPE_CHECKING, cast, @@ -26,6 +28,12 @@ from pandas._libs.tslibs.nattype import NaTType +# ODF variant of ISO 8601 time/duration format: "PThhhHmmMss.sssS" +# see https://www.w3.org/TR/xmlschema-2/#duration for details +ODF_ISOTIME_PATTERN = re.compile( + r"^\s*PT\s*(\d+)\s*H\s*(\d+)\s*M\s*(\d+)(\.(\d+))?\s*S$" +) + @doc(storage_options=_shared_docs["storage_options"]) class ODFReader(BaseExcelReader["OpenDocument"]): @@ -214,9 +222,9 @@ def _get_cell_value(self, cell) -> Scalar | NaTType: cell_value = cell.attributes.get((OFFICENS, "date-value")) return pd.Timestamp(cell_value) elif cell_type == "time": - stamp = pd.Timestamp(str(cell)) + stamp = self._get_cell_time_value(cell) # cast needed here because Scalar doesn't include datetime.time - return cast(Scalar, stamp.time()) + return cast(Scalar, stamp) else: self.close() raise ValueError(f"Unrecognized type {cell_type}") @@ -247,3 +255,28 @@ def _get_cell_string_value(self, cell) -> str: else: value.append(str(fragment).strip("\n")) return "".join(value) + + def _get_cell_time_value(self, cell) -> datetime.time: + """ + This helper function parses ODF time value + """ + from odf.namespaces import OFFICENS + + value = cell.attributes.get((OFFICENS, "time-value")) + parts = ODF_ISOTIME_PATTERN.match(value) + if parts is None: + raise ValueError(f"Failed to parse ODF time value: {value}") + hours, minutes, seconds, _, second_part = parts.group(*range(1, 6)) + if second_part is None: + microseconds = 0 + else: + microseconds = int(int(second_part) * pow(10, 6 - len(second_part))) + + return datetime.time( + # ignore date part from some representations + # and datetime.time restrict hour values to 0..23 + hour=int(hours) % 24, + minute=int(minutes), + second=int(seconds), + microsecond=microseconds, + ) diff --git a/pandas/tests/io/data/excel/test_corrupted_time.ods b/pandas/tests/io/data/excel/test_corrupted_time.ods new file mode 100644 index 0000000000000..c3c3d105a4e0c Binary files /dev/null and b/pandas/tests/io/data/excel/test_corrupted_time.ods differ diff --git a/pandas/tests/io/data/excel/times_1900.ods b/pandas/tests/io/data/excel/times_1900.ods index 79e031c721ea3..7e307952bf89f 100644 Binary files a/pandas/tests/io/data/excel/times_1900.ods and b/pandas/tests/io/data/excel/times_1900.ods differ diff --git a/pandas/tests/io/data/excel/times_1904.ods b/pandas/tests/io/data/excel/times_1904.ods index b47a949d3b715..88ec4a17b2a3b 100644 Binary files a/pandas/tests/io/data/excel/times_1904.ods and b/pandas/tests/io/data/excel/times_1904.ods differ diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py index 8dd9f96a05a90..b76f0d035d7af 100644 --- a/pandas/tests/io/excel/test_readers.py +++ b/pandas/tests/io/excel/test_readers.py @@ -1014,6 +1014,11 @@ def test_reader_seconds(self, request, engine, read_ext): actual = pd.read_excel("times_1904" + read_ext, sheet_name="Sheet1") tm.assert_frame_equal(actual, expected) + if read_ext == ".ods": + msg = "Failed to parse ODF time value: PT01H5a2M00S" + with pytest.raises(ValueError, match=msg): + pd.read_excel("test_corrupted_time" + read_ext) + def test_read_excel_multiindex(self, request, engine, read_ext): # see gh-4679 if engine == "pyxlsb":
- [x] Similar to: #55200 variant for `time-value` cells - [x] Tests: added comments to cells in `test_1900.ods` and `test_1904.ods` fixtures, so that [`io/excel/test_readers.py:test_reader_seconds()`](https://github.com/pandas-dev/pandas/blob/main/pandas/tests/io/excel/test_readers.py#L976-L1015) would be failing without this fix. Also fixed missing microseconds there (see p.1 in #55045). - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/v2.2.0.rst` file. > ODF `office:time-value` format is specified by [p.3.2.6 of XML Schema (Part 2)](https://www.w3.org/TR/2004/REC-xmlschema-2-20041028/#duration) as referenced in [p.19.389 of OpenDocument Schema Part 3](https://docs.oasis-open.org/office/OpenDocument/v1.3/OpenDocument-v1.3-part3-schema.html#attribute-office_value-type) Related to: #55045 (test files updates)
https://api.github.com/repos/pandas-dev/pandas/pulls/55324
2023-09-29T13:31:30Z
2023-11-27T18:49:29Z
null
2023-11-28T08:36:11Z
CLN: Clean up some iteration logic in tslib
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 20a18cf56779f..43252ffb5bf13 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -455,18 +455,18 @@ cpdef array_to_datetime( set out_tzoffset_vals = set() tzinfo tz_out = None bint found_tz = False, found_naive = False - cnp.broadcast mi + cnp.flatiter it = cnp.PyArray_IterNew(values) # specify error conditions assert is_raise or is_ignore or is_coerce result = np.empty((<object>values).shape, dtype="M8[ns]") - mi = cnp.PyArray_MultiIterNew2(result, values) iresult = result.view("i8").ravel() for i in range(n): # Analogous to `val = values[i]` - val = <object>(<PyObject**>cnp.PyArray_MultiIter_DATA(mi, 1))[0] + val = cnp.PyArray_GETITEM(values, cnp.PyArray_ITER_DATA(it)) + cnp.PyArray_ITER_NEXT(it) try: if checknull_with_nat_and_na(val): @@ -511,7 +511,6 @@ cpdef array_to_datetime( if parse_today_now(val, &iresult[i], utc): # We can't _quite_ dispatch this to convert_str_to_tsobject # bc there isn't a nice way to pass "utc" - cnp.PyArray_MultiIter_NEXT(mi) continue _ts = convert_str_to_tsobject( @@ -540,13 +539,10 @@ cpdef array_to_datetime( else: raise TypeError(f"{type(val)} is not convertible to datetime") - cnp.PyArray_MultiIter_NEXT(mi) - except (TypeError, OverflowError, ValueError) as ex: ex.args = (f"{ex}, at position {i}",) if is_coerce: iresult[i] = NPY_NAT - cnp.PyArray_MultiIter_NEXT(mi) continue elif is_raise: raise
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55320
2023-09-28T22:34:57Z
2023-09-29T14:50:51Z
2023-09-29T14:50:51Z
2023-09-29T18:17:59Z
BUG: `date_range` `inclusive` parameter behavior doesn't match interval notation
diff --git a/doc/source/whatsnew/v2.1.3.rst b/doc/source/whatsnew/v2.1.3.rst index af626895a9e0e..79333415a2a11 100644 --- a/doc/source/whatsnew/v2.1.3.rst +++ b/doc/source/whatsnew/v2.1.3.rst @@ -21,6 +21,7 @@ Fixed regressions Bug fixes ~~~~~~~~~ - Bug in :meth:`DatetimeIndex.diff` raising ``TypeError`` (:issue:`55080`) +- Fixed bug in :meth:`DatetimeArray._generate_range` where ``inclusive`` argument behavior did not match interval notation (:issue:`55293`,:issue:`46331`) - Bug in :meth:`Index.isin` raising for Arrow backed string and ``None`` value (:issue:`55821`) - Fix :func:`read_parquet` and :func:`read_feather` for `CVE-2023-47248 <https://www.cve.org/CVERecord?id=CVE-2023-47248>`__ (:issue:`55894`) diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 0074645a482b2..4d9d48763ce80 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -508,17 +508,11 @@ def _generate_range( # type: ignore[override] # to overflow and cast to e.g. f8, but if it does we need to cast i8values = i8values.astype("i8") - if start == end: - if not left_inclusive and not right_inclusive: - i8values = i8values[1:-1] - else: - start_i8 = Timestamp(start)._value - end_i8 = Timestamp(end)._value - if not left_inclusive or not right_inclusive: - if not left_inclusive and len(i8values) and i8values[0] == start_i8: - i8values = i8values[1:] - if not right_inclusive and len(i8values) and i8values[-1] == end_i8: - i8values = i8values[:-1] + if not left_inclusive: + i8values = i8values[1:] + + if not right_inclusive: + i8values = i8values[:-1] dt64_values = i8values.view(f"datetime64[{unit}]") dtype = tz_to_dtype(tz, unit=unit) diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py index 44dd64e162413..f8fd224963b56 100644 --- a/pandas/tests/indexes/datetimes/test_date_range.py +++ b/pandas/tests/indexes/datetimes/test_date_range.py @@ -49,29 +49,22 @@ def _get_expected_range( - begin_to_match, - end_to_match, both_range, inclusive_endpoints, ): """Helper to get expected range from a both inclusive range""" - left_match = begin_to_match == both_range[0] - right_match = end_to_match == both_range[-1] - - if inclusive_endpoints == "left" and right_match: - expected_range = both_range[:-1] - elif inclusive_endpoints == "right" and left_match: - expected_range = both_range[1:] - elif inclusive_endpoints == "neither" and left_match and right_match: - expected_range = both_range[1:-1] - elif inclusive_endpoints == "neither" and right_match: + if inclusive_endpoints == "left": expected_range = both_range[:-1] - elif inclusive_endpoints == "neither" and left_match: + elif inclusive_endpoints == "right": expected_range = both_range[1:] elif inclusive_endpoints == "both": expected_range = both_range[:] + elif inclusive_endpoints == "neither": + expected_range = both_range[1:-1] else: - expected_range = both_range[:] + raise ValueError( + "Inclusive has to be either 'both', 'neither', 'left' or 'right'" + ) return expected_range @@ -591,12 +584,29 @@ def test_range_closed(self, freq, tz, inclusive_endpoints_fixture): begin = Timestamp("2011/1/1", tz=tz) end = Timestamp("2014/1/1", tz=tz) + result_range = date_range( + begin, end, inclusive=inclusive_endpoints_fixture, freq=freq + ) + both_range = date_range(begin, end, inclusive="both", freq=freq) + expected_range = _get_expected_range(both_range, inclusive_endpoints_fixture) + + tm.assert_index_equal(expected_range, result_range) + + @pytest.mark.parametrize("freq", ["1D", "3D", "2ME", "7W", "3h", "YE"]) + def test_range_closed_with_tz_aware_start_end( + self, freq, inclusive_endpoints_fixture + ): + # GH12409, GH12684 + begin = Timestamp("2011/1/1", tz="US/Eastern") + end = Timestamp("2014/1/1", tz="US/Eastern") + result_range = date_range( begin, end, inclusive=inclusive_endpoints_fixture, freq=freq ) both_range = date_range(begin, end, inclusive="both", freq=freq) expected_range = _get_expected_range( - begin, end, both_range, inclusive_endpoints_fixture + both_range, + inclusive_endpoints_fixture, ) tm.assert_index_equal(expected_range, result_range) @@ -607,8 +617,8 @@ def test_range_with_tz_closed_with_tz_aware_start_end( ): begin = Timestamp("2011/1/1") end = Timestamp("2014/1/1") - begintz = Timestamp("2011/1/1", tz="US/Eastern") - endtz = Timestamp("2014/1/1", tz="US/Eastern") + # begintz = Timestamp("2011/1/1", tz="US/Eastern") + # endtz = Timestamp("2014/1/1", tz="US/Eastern") result_range = date_range( begin, @@ -621,8 +631,6 @@ def test_range_with_tz_closed_with_tz_aware_start_end( begin, end, inclusive="both", freq=freq, tz="US/Eastern" ) expected_range = _get_expected_range( - begintz, - endtz, both_range, inclusive_endpoints_fixture, ) @@ -656,18 +664,9 @@ def test_range_closed_boundary(self, inclusive_endpoints_fixture): inclusive=inclusive_endpoints_fixture, ) - expected_right = both_boundary - expected_left = both_boundary - expected_both = both_boundary - - if inclusive_endpoints_fixture == "right": - expected_left = both_boundary[1:] - elif inclusive_endpoints_fixture == "left": - expected_right = both_boundary[:-1] - elif inclusive_endpoints_fixture == "both": - expected_right = both_boundary[1:] - expected_left = both_boundary[:-1] - + expected_right = both_boundary[1:] + expected_left = both_boundary[:-1] + expected_both = both_boundary[:] expected_neither = both_boundary[1:-1] tm.assert_index_equal(right_boundary, expected_right) @@ -748,10 +747,7 @@ def test_range_where_start_equal_end(self, inclusive_endpoints_fixture): ) both_range = date_range(start=start, end=end, freq="D", inclusive="both") - if inclusive_endpoints_fixture == "neither": - expected = both_range[1:-1] - elif inclusive_endpoints_fixture in ("left", "right", "both"): - expected = both_range[:] + expected = _get_expected_range(both_range, inclusive_endpoints_fixture) tm.assert_index_equal(result, expected) @@ -1102,7 +1098,7 @@ def test_bdays_and_open_boundaries(self, inclusive): bday_start = "2018-07-23" # Monday bday_end = "2018-07-27" # Friday - expected = date_range(bday_start, bday_end, freq="D") + expected = date_range(bday_start, bday_end, freq="D", inclusive=inclusive) tm.assert_index_equal(result, expected) # Note: we do _not_ expect the freqs to match here
- [X] closes #55293 - [X] closes #46331 - [X] Added an entry in the latest `doc/source/whatsnew/v2.1.3.rst` file if fixing a bug or adding a new feature. This pr fixes the issues with `inclusive` in `date_range` Got rid of the problematic expression which seems to be `i8values[] == start/end_i8`, also the `and` in line 498 should have been an `or`. However after those fixes the if-statement was over complicated, so I simplified it.
https://api.github.com/repos/pandas-dev/pandas/pulls/55319
2023-09-28T21:34:01Z
2024-04-15T17:28:33Z
null
2024-04-15T17:28:34Z
ENH: propagating attrs always uses deepcopy
diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 7a177344a42c7..9a524db89de85 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -74,12 +74,12 @@ enhancement2 Other enhancements ^^^^^^^^^^^^^^^^^^ +- :attr:`Series.attrs` / :attr:`DataFrame.attrs` now uses a deepcopy for propagating ``attrs`` (:issue:`54134`). - :func:`read_csv` now supports ``on_bad_lines`` parameter with ``engine="pyarrow"``. (:issue:`54480`) - :meth:`ExtensionArray._explode` interface method added to allow extension type implementations of the ``explode`` method (:issue:`54833`) - :meth:`ExtensionArray.duplicated` added to allow extension type implementations of the ``duplicated`` method (:issue:`55255`) - DataFrame.apply now allows the usage of numba (via ``engine="numba"``) to JIT compile the passed function, allowing for potential speedups (:issue:`54666`) - Implement masked algorithms for :meth:`Series.value_counts` (:issue:`54984`) -- .. --------------------------------------------------------------------------- .. _whatsnew_220.notable_bug_fixes: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 738f4cbe6bc43..2e6c8919eff38 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2,6 +2,7 @@ from __future__ import annotations import collections +from copy import deepcopy import datetime as dt from functools import partial import gc @@ -368,6 +369,13 @@ def attrs(self) -> dict[Hashable, Any]: -------- DataFrame.flags : Global flags applying to this object. + Notes + ----- + Many operations that create new datasets will copy ``attrs``. Copies + are always deep so that changing ``attrs`` will only affect the + present dataset. ``pandas.concat`` copies ``attrs`` only if all input + datasets have the same ``attrs``. + Examples -------- For Series: @@ -6191,8 +6199,12 @@ def __finalize__(self, other, method: str | None = None, **kwargs) -> Self: stable across pandas releases. """ if isinstance(other, NDFrame): - for name in other.attrs: - self.attrs[name] = other.attrs[name] + if other.attrs: + # We want attrs propagation to have minimal performance + # impact if attrs are not used; i.e. attrs is an empty dict. + # One could make the deepcopy unconditionally, but a deepcopy + # of an empty dict is 50x more expensive than the empty check. + self.attrs = deepcopy(other.attrs) self.flags.allows_duplicate_labels = other.flags.allows_duplicate_labels # For subclasses using _metadata. @@ -6201,11 +6213,13 @@ def __finalize__(self, other, method: str | None = None, **kwargs) -> Self: object.__setattr__(self, name, getattr(other, name, None)) if method == "concat": - attrs = other.objs[0].attrs - check_attrs = all(objs.attrs == attrs for objs in other.objs[1:]) - if check_attrs: - for name in attrs: - self.attrs[name] = attrs[name] + # propagate attrs only if all concat arguments have the same attrs + if all(bool(obj.attrs) for obj in other.objs): + # all concatenate arguments have non-empty attrs + attrs = other.objs[0].attrs + have_same_attrs = all(obj.attrs == attrs for obj in other.objs[1:]) + if have_same_attrs: + self.attrs = deepcopy(attrs) allows_duplicate_labels = all( x.flags.allows_duplicate_labels for x in other.objs diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index 8fc78629beb0a..06bf169bf4dbc 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -315,6 +315,15 @@ def test_attrs(self): result = df.rename(columns=str) assert result.attrs == {"version": 1} + def test_attrs_deepcopy(self): + df = DataFrame({"A": [2, 3]}) + assert df.attrs == {} + df.attrs["tags"] = {"spam", "ham"} + + result = df.rename(columns=str) + assert result.attrs == df.attrs + assert result.attrs["tags"] is not df.attrs["tags"] + @pytest.mark.parametrize("allows_duplicate_labels", [True, False, None]) def test_set_flags( self, allows_duplicate_labels, frame_or_series, using_copy_on_write
Always using a deepcopy prevents shared state and thus unintentional modification of the attrs of other objects. IMHO this safety has a higher priority than the slight performance cost of the deepcopy. The implementation now skips the copying if *attrs* are not used (i.e. an empty dict). This check takes only ~20ns. Thus, the attrs copy mechanism has no performance impact if attrs are not used. - [x] Closes #54134 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55314
2023-09-28T10:16:26Z
2023-10-04T16:38:17Z
2023-10-04T16:38:17Z
2023-10-20T22:27:58Z
TYP: overload for DataFrame.to_xml
diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 445b93705cde5..6ddb1613076ac 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -218,6 +218,7 @@ Other Deprecations - Deprecated allowing non-keyword arguments in :meth:`DataFrame.to_parquet` except ``path``. (:issue:`54229`) - Deprecated allowing non-keyword arguments in :meth:`DataFrame.to_pickle` except ``path``. (:issue:`54229`) - Deprecated allowing non-keyword arguments in :meth:`DataFrame.to_string` except ``buf``. (:issue:`54229`) +- Deprecated allowing non-keyword arguments in :meth:`DataFrame.to_xml` except ``path_or_buffer``. (:issue:`54229`) - Deprecated automatic downcasting of object-dtype results in :meth:`Series.replace` and :meth:`DataFrame.replace`, explicitly call ``result = result.infer_objects(copy=False)`` instead. To opt in to the future version, use ``pd.set_option("future.no_silent_downcasting", True)`` (:issue:`54710`) - Deprecated downcasting behavior in :meth:`Series.where`, :meth:`DataFrame.where`, :meth:`Series.mask`, :meth:`DataFrame.mask`, :meth:`Series.clip`, :meth:`DataFrame.clip`; in a future version these will not infer object-dtype columns to non-object dtype, or all-round floats to integer dtype. Call ``result.infer_objects(copy=False)`` on the result for object inference, or explicitly cast floats to ints. To opt in to the future version, use ``pd.set_option("future.no_silent_downcasting", True)`` (:issue:`53656`) - Deprecated including the groups in computations when using :meth:`DataFrameGroupBy.apply` and :meth:`DataFrameGroupBy.resample`; pass ``include_groups=False`` to exclude the groups (:issue:`7155`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 432c0a745c7a0..a16597221ac92 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3272,6 +3272,55 @@ def to_html( render_links=render_links, ) + @overload + def to_xml( + self, + path_or_buffer: None = ..., + *, + index: bool = ..., + root_name: str | None = ..., + row_name: str | None = ..., + na_rep: str | None = ..., + attr_cols: list[str] | None = ..., + elem_cols: list[str] | None = ..., + namespaces: dict[str | None, str] | None = ..., + prefix: str | None = ..., + encoding: str = ..., + xml_declaration: bool | None = ..., + pretty_print: bool | None = ..., + parser: XMLParsers | None = ..., + stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = ..., + compression: CompressionOptions = ..., + storage_options: StorageOptions | None = ..., + ) -> str: + ... + + @overload + def to_xml( + self, + path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str], + *, + index: bool = ..., + root_name: str | None = ..., + row_name: str | None = ..., + na_rep: str | None = ..., + attr_cols: list[str] | None = ..., + elem_cols: list[str] | None = ..., + namespaces: dict[str | None, str] | None = ..., + prefix: str | None = ..., + encoding: str = ..., + xml_declaration: bool | None = ..., + pretty_print: bool | None = ..., + parser: XMLParsers | None = ..., + stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = ..., + compression: CompressionOptions = ..., + storage_options: StorageOptions | None = ..., + ) -> None: + ... + + @deprecate_nonkeyword_arguments( + version="3.0", allowed_args=["self", "path_or_buffer"], name="to_xml" + ) @doc( storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buffer",
and deprecate non-keyword arguments
https://api.github.com/repos/pandas-dev/pandas/pulls/55313
2023-09-28T01:26:31Z
2023-09-28T16:16:11Z
2023-09-28T16:16:11Z
2023-09-28T16:16:23Z
REF: Add tests.groupby.methods
diff --git a/pandas/tests/groupby/methods/__init__.py b/pandas/tests/groupby/methods/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/groupby/methods/test_corrwith.py b/pandas/tests/groupby/methods/test_corrwith.py new file mode 100644 index 0000000000000..53e8bdc4534dc --- /dev/null +++ b/pandas/tests/groupby/methods/test_corrwith.py @@ -0,0 +1,24 @@ +import numpy as np + +from pandas import ( + DataFrame, + Index, + Series, +) +import pandas._testing as tm + + +def test_corrwith_with_1_axis(): + # GH 47723 + df = DataFrame({"a": [1, 1, 2], "b": [3, 7, 4]}) + gb = df.groupby("a") + + msg = "DataFrameGroupBy.corrwith with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = gb.corrwith(df, axis=1) + index = Index( + data=[(1, 0), (1, 1), (1, 2), (2, 2), (2, 0), (2, 1)], + name=("a", None), + ) + expected = Series([np.nan] * 6, index=index) + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/groupby/methods/test_describe.py b/pandas/tests/groupby/methods/test_describe.py new file mode 100644 index 0000000000000..f38de8faddb59 --- /dev/null +++ b/pandas/tests/groupby/methods/test_describe.py @@ -0,0 +1,221 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + Timestamp, +) +import pandas._testing as tm + + +def test_apply_describe_bug(mframe): + grouped = mframe.groupby(level="first") + grouped.describe() # it works! + + +def test_series_describe_multikey(): + ts = tm.makeTimeSeries() + grouped = ts.groupby([lambda x: x.year, lambda x: x.month]) + result = grouped.describe() + tm.assert_series_equal(result["mean"], grouped.mean(), check_names=False) + tm.assert_series_equal(result["std"], grouped.std(), check_names=False) + tm.assert_series_equal(result["min"], grouped.min(), check_names=False) + + +def test_series_describe_single(): + ts = tm.makeTimeSeries() + grouped = ts.groupby(lambda x: x.month) + result = grouped.apply(lambda x: x.describe()) + expected = grouped.describe().stack(future_stack=True) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("keys", ["key1", ["key1", "key2"]]) +def test_series_describe_as_index(as_index, keys): + # GH#49256 + df = DataFrame( + { + "key1": ["one", "two", "two", "three", "two"], + "key2": ["one", "two", "two", "three", "two"], + "foo2": [1, 2, 4, 4, 6], + } + ) + gb = df.groupby(keys, as_index=as_index)["foo2"] + result = gb.describe() + expected = DataFrame( + { + "key1": ["one", "three", "two"], + "count": [1.0, 1.0, 3.0], + "mean": [1.0, 4.0, 4.0], + "std": [np.nan, np.nan, 2.0], + "min": [1.0, 4.0, 2.0], + "25%": [1.0, 4.0, 3.0], + "50%": [1.0, 4.0, 4.0], + "75%": [1.0, 4.0, 5.0], + "max": [1.0, 4.0, 6.0], + } + ) + if len(keys) == 2: + expected.insert(1, "key2", expected["key1"]) + if as_index: + expected = expected.set_index(keys) + tm.assert_frame_equal(result, expected) + + +def test_frame_describe_multikey(tsframe): + grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month]) + result = grouped.describe() + desc_groups = [] + for col in tsframe: + group = grouped[col].describe() + # GH 17464 - Remove duplicate MultiIndex levels + group_col = MultiIndex( + levels=[[col], group.columns], + codes=[[0] * len(group.columns), range(len(group.columns))], + ) + group = DataFrame(group.values, columns=group_col, index=group.index) + desc_groups.append(group) + expected = pd.concat(desc_groups, axis=1) + tm.assert_frame_equal(result, expected) + + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + groupedT = tsframe.groupby({"A": 0, "B": 0, "C": 1, "D": 1}, axis=1) + result = groupedT.describe() + expected = tsframe.describe().T + # reverting the change from https://github.com/pandas-dev/pandas/pull/35441/ + expected.index = MultiIndex( + levels=[[0, 1], expected.index], + codes=[[0, 0, 1, 1], range(len(expected.index))], + ) + tm.assert_frame_equal(result, expected) + + +def test_frame_describe_tupleindex(): + # GH 14848 - regression from 0.19.0 to 0.19.1 + df1 = DataFrame( + { + "x": [1, 2, 3, 4, 5] * 3, + "y": [10, 20, 30, 40, 50] * 3, + "z": [100, 200, 300, 400, 500] * 3, + } + ) + df1["k"] = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] * 5 + df2 = df1.rename(columns={"k": "key"}) + msg = "Names should be list-like for a MultiIndex" + with pytest.raises(ValueError, match=msg): + df1.groupby("k").describe() + with pytest.raises(ValueError, match=msg): + df2.groupby("key").describe() + + +def test_frame_describe_unstacked_format(): + # GH 4792 + prices = { + Timestamp("2011-01-06 10:59:05", tz=None): 24990, + Timestamp("2011-01-06 12:43:33", tz=None): 25499, + Timestamp("2011-01-06 12:54:09", tz=None): 25499, + } + volumes = { + Timestamp("2011-01-06 10:59:05", tz=None): 1500000000, + Timestamp("2011-01-06 12:43:33", tz=None): 5000000000, + Timestamp("2011-01-06 12:54:09", tz=None): 100000000, + } + df = DataFrame({"PRICE": prices, "VOLUME": volumes}) + result = df.groupby("PRICE").VOLUME.describe() + data = [ + df[df.PRICE == 24990].VOLUME.describe().values.tolist(), + df[df.PRICE == 25499].VOLUME.describe().values.tolist(), + ] + expected = DataFrame( + data, + index=Index([24990, 25499], name="PRICE"), + columns=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.filterwarnings( + "ignore:" + "indexing past lexsort depth may impact performance:" + "pandas.errors.PerformanceWarning" +) +@pytest.mark.parametrize("as_index", [True, False]) +@pytest.mark.parametrize("keys", [["a1"], ["a1", "a2"]]) +def test_describe_with_duplicate_output_column_names(as_index, keys): + # GH 35314 + df = DataFrame( + { + "a1": [99, 99, 99, 88, 88, 88], + "a2": [99, 99, 99, 88, 88, 88], + "b": [1, 2, 3, 4, 5, 6], + "c": [10, 20, 30, 40, 50, 60], + }, + columns=["a1", "a2", "b", "b"], + copy=False, + ) + if keys == ["a1"]: + df = df.drop(columns="a2") + + expected = ( + DataFrame.from_records( + [ + ("b", "count", 3.0, 3.0), + ("b", "mean", 5.0, 2.0), + ("b", "std", 1.0, 1.0), + ("b", "min", 4.0, 1.0), + ("b", "25%", 4.5, 1.5), + ("b", "50%", 5.0, 2.0), + ("b", "75%", 5.5, 2.5), + ("b", "max", 6.0, 3.0), + ("b", "count", 3.0, 3.0), + ("b", "mean", 5.0, 2.0), + ("b", "std", 1.0, 1.0), + ("b", "min", 4.0, 1.0), + ("b", "25%", 4.5, 1.5), + ("b", "50%", 5.0, 2.0), + ("b", "75%", 5.5, 2.5), + ("b", "max", 6.0, 3.0), + ], + ) + .set_index([0, 1]) + .T + ) + expected.columns.names = [None, None] + if len(keys) == 2: + expected.index = MultiIndex( + levels=[[88, 99], [88, 99]], codes=[[0, 1], [0, 1]], names=["a1", "a2"] + ) + else: + expected.index = Index([88, 99], name="a1") + + if not as_index: + expected = expected.reset_index() + + result = df.groupby(keys, as_index=as_index).describe() + + tm.assert_frame_equal(result, expected) + + +def test_describe_duplicate_columns(): + # GH#50806 + df = DataFrame([[0, 1, 2, 3]]) + df.columns = [0, 1, 2, 0] + gb = df.groupby(df[1]) + result = gb.describe(percentiles=[]) + + columns = ["count", "mean", "std", "min", "50%", "max"] + frames = [ + DataFrame([[1.0, val, np.nan, val, val, val]], index=[1], columns=columns) + for val in (0.0, 2.0, 3.0) + ] + expected = pd.concat(frames, axis=1) + expected.columns = MultiIndex( + levels=[[0, 2], columns], + codes=[6 * [0] + 6 * [1] + 6 * [0], 3 * list(range(6))], + ) + expected.index.names = [1] + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/test_groupby_shift_diff.py b/pandas/tests/groupby/methods/test_groupby_shift_diff.py similarity index 100% rename from pandas/tests/groupby/test_groupby_shift_diff.py rename to pandas/tests/groupby/methods/test_groupby_shift_diff.py diff --git a/pandas/tests/groupby/methods/test_is_monotonic.py b/pandas/tests/groupby/methods/test_is_monotonic.py new file mode 100644 index 0000000000000..3428fc90f6e51 --- /dev/null +++ b/pandas/tests/groupby/methods/test_is_monotonic.py @@ -0,0 +1,78 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + Series, +) +import pandas._testing as tm + + +@pytest.mark.parametrize( + "in_vals, out_vals", + [ + # Basics: strictly increasing (T), strictly decreasing (F), + # abs val increasing (F), non-strictly increasing (T) + ([1, 2, 5, 3, 2, 0, 4, 5, -6, 1, 1], [True, False, False, True]), + # Test with inf vals + ( + [1, 2.1, np.inf, 3, 2, np.inf, -np.inf, 5, 11, 1, -np.inf], + [True, False, True, False], + ), + # Test with nan vals; should always be False + ( + [1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan], + [False, False, False, False], + ), + ], +) +def test_is_monotonic_increasing(in_vals, out_vals): + # GH 17015 + source_dict = { + "A": ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"], + "B": ["a", "a", "a", "b", "b", "b", "c", "c", "c", "d", "d"], + "C": in_vals, + } + df = DataFrame(source_dict) + result = df.groupby("B").C.is_monotonic_increasing + index = Index(list("abcd"), name="B") + expected = Series(index=index, data=out_vals, name="C") + tm.assert_series_equal(result, expected) + + # Also check result equal to manually taking x.is_monotonic_increasing. + expected = df.groupby(["B"]).C.apply(lambda x: x.is_monotonic_increasing) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "in_vals, out_vals", + [ + # Basics: strictly decreasing (T), strictly increasing (F), + # abs val decreasing (F), non-strictly increasing (T) + ([10, 9, 7, 3, 4, 5, -3, 2, 0, 1, 1], [True, False, False, True]), + # Test with inf vals + ( + [np.inf, 1, -np.inf, np.inf, 2, -3, -np.inf, 5, -3, -np.inf, -np.inf], + [True, True, False, True], + ), + # Test with nan vals; should always be False + ( + [1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan], + [False, False, False, False], + ), + ], +) +def test_is_monotonic_decreasing(in_vals, out_vals): + # GH 17015 + source_dict = { + "A": ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"], + "B": ["a", "a", "a", "b", "b", "b", "c", "c", "c", "d", "d"], + "C": in_vals, + } + + df = DataFrame(source_dict) + result = df.groupby("B").C.is_monotonic_decreasing + index = Index(list("abcd"), name="B") + expected = Series(index=index, data=out_vals, name="C") + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/groupby/methods/test_nlargest_nsmallest.py b/pandas/tests/groupby/methods/test_nlargest_nsmallest.py new file mode 100644 index 0000000000000..bf983f04a3f3f --- /dev/null +++ b/pandas/tests/groupby/methods/test_nlargest_nsmallest.py @@ -0,0 +1,115 @@ +import numpy as np +import pytest + +from pandas import ( + MultiIndex, + Series, + date_range, +) +import pandas._testing as tm + + +def test_nlargest(): + a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10]) + b = Series(list("a" * 5 + "b" * 5)) + gb = a.groupby(b) + r = gb.nlargest(3) + e = Series( + [7, 5, 3, 10, 9, 6], + index=MultiIndex.from_arrays([list("aaabbb"), [3, 2, 1, 9, 5, 8]]), + ) + tm.assert_series_equal(r, e) + + a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0]) + gb = a.groupby(b) + e = Series( + [3, 2, 1, 3, 3, 2], + index=MultiIndex.from_arrays([list("aaabbb"), [2, 3, 1, 6, 5, 7]]), + ) + tm.assert_series_equal(gb.nlargest(3, keep="last"), e) + + +def test_nlargest_mi_grouper(): + # see gh-21411 + npr = np.random.default_rng(2) + + dts = date_range("20180101", periods=10) + iterables = [dts, ["one", "two"]] + + idx = MultiIndex.from_product(iterables, names=["first", "second"]) + s = Series(npr.standard_normal(20), index=idx) + + result = s.groupby("first").nlargest(1) + + exp_idx = MultiIndex.from_tuples( + [ + (dts[0], dts[0], "one"), + (dts[1], dts[1], "one"), + (dts[2], dts[2], "one"), + (dts[3], dts[3], "two"), + (dts[4], dts[4], "one"), + (dts[5], dts[5], "one"), + (dts[6], dts[6], "one"), + (dts[7], dts[7], "one"), + (dts[8], dts[8], "one"), + (dts[9], dts[9], "one"), + ], + names=["first", "first", "second"], + ) + + exp_values = [ + 0.18905338179353307, + -0.41306354339189344, + 1.799707382720902, + 0.7738065867276614, + 0.28121066979764925, + 0.9775674511260357, + -0.3288239040579627, + 0.45495807124085547, + 0.5452887139646817, + 0.12682784711186987, + ] + + expected = Series(exp_values, index=exp_idx) + tm.assert_series_equal(result, expected, check_exact=False, rtol=1e-3) + + +def test_nsmallest(): + a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10]) + b = Series(list("a" * 5 + "b" * 5)) + gb = a.groupby(b) + r = gb.nsmallest(3) + e = Series( + [1, 2, 3, 0, 4, 6], + index=MultiIndex.from_arrays([list("aaabbb"), [0, 4, 1, 6, 7, 8]]), + ) + tm.assert_series_equal(r, e) + + a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0]) + gb = a.groupby(b) + e = Series( + [0, 1, 1, 0, 1, 2], + index=MultiIndex.from_arrays([list("aaabbb"), [4, 1, 0, 9, 8, 7]]), + ) + tm.assert_series_equal(gb.nsmallest(3, keep="last"), e) + + +@pytest.mark.parametrize( + "data, groups", + [([0, 1, 2, 3], [0, 0, 1, 1]), ([0], [0])], +) +@pytest.mark.parametrize("dtype", [None, *tm.ALL_INT_NUMPY_DTYPES]) +@pytest.mark.parametrize("method", ["nlargest", "nsmallest"]) +def test_nlargest_and_smallest_noop(data, groups, dtype, method): + # GH 15272, GH 16345, GH 29129 + # Test nlargest/smallest when it results in a noop, + # i.e. input is sorted and group size <= n + if dtype is not None: + data = np.array(data, dtype=dtype) + if method == "nlargest": + data = list(reversed(data)) + ser = Series(data, name="a") + result = getattr(ser.groupby(groups), method)(n=2) + expidx = np.array(groups, dtype=int) if isinstance(groups, list) else groups + expected = Series(data, index=MultiIndex.from_arrays([expidx, ser.index]), name="a") + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/groupby/test_nth.py b/pandas/tests/groupby/methods/test_nth.py similarity index 100% rename from pandas/tests/groupby/test_nth.py rename to pandas/tests/groupby/methods/test_nth.py diff --git a/pandas/tests/groupby/test_quantile.py b/pandas/tests/groupby/methods/test_quantile.py similarity index 100% rename from pandas/tests/groupby/test_quantile.py rename to pandas/tests/groupby/methods/test_quantile.py diff --git a/pandas/tests/groupby/test_rank.py b/pandas/tests/groupby/methods/test_rank.py similarity index 100% rename from pandas/tests/groupby/test_rank.py rename to pandas/tests/groupby/methods/test_rank.py diff --git a/pandas/tests/groupby/test_sample.py b/pandas/tests/groupby/methods/test_sample.py similarity index 100% rename from pandas/tests/groupby/test_sample.py rename to pandas/tests/groupby/methods/test_sample.py diff --git a/pandas/tests/groupby/test_size.py b/pandas/tests/groupby/methods/test_size.py similarity index 100% rename from pandas/tests/groupby/test_size.py rename to pandas/tests/groupby/methods/test_size.py diff --git a/pandas/tests/groupby/test_skew.py b/pandas/tests/groupby/methods/test_skew.py similarity index 100% rename from pandas/tests/groupby/test_skew.py rename to pandas/tests/groupby/methods/test_skew.py diff --git a/pandas/tests/groupby/test_value_counts.py b/pandas/tests/groupby/methods/test_value_counts.py similarity index 100% rename from pandas/tests/groupby/test_value_counts.py rename to pandas/tests/groupby/methods/test_value_counts.py diff --git a/pandas/tests/groupby/test_any_all.py b/pandas/tests/groupby/test_any_all.py deleted file mode 100644 index 57a83335be849..0000000000000 --- a/pandas/tests/groupby/test_any_all.py +++ /dev/null @@ -1,188 +0,0 @@ -import builtins - -import numpy as np -import pytest - -import pandas as pd -from pandas import ( - DataFrame, - Index, - Series, - isna, -) -import pandas._testing as tm - - -@pytest.mark.parametrize("agg_func", ["any", "all"]) -@pytest.mark.parametrize( - "vals", - [ - ["foo", "bar", "baz"], - ["foo", "", ""], - ["", "", ""], - [1, 2, 3], - [1, 0, 0], - [0, 0, 0], - [1.0, 2.0, 3.0], - [1.0, 0.0, 0.0], - [0.0, 0.0, 0.0], - [True, True, True], - [True, False, False], - [False, False, False], - [np.nan, np.nan, np.nan], - ], -) -def test_groupby_bool_aggs(skipna, agg_func, vals): - df = DataFrame({"key": ["a"] * 3 + ["b"] * 3, "val": vals * 2}) - - # Figure out expectation using Python builtin - exp = getattr(builtins, agg_func)(vals) - - # edge case for missing data with skipna and 'any' - if skipna and all(isna(vals)) and agg_func == "any": - exp = False - - expected = DataFrame( - [exp] * 2, columns=["val"], index=Index(["a", "b"], name="key") - ) - result = getattr(df.groupby("key"), agg_func)(skipna=skipna) - tm.assert_frame_equal(result, expected) - - -def test_any(): - df = DataFrame( - [[1, 2, "foo"], [1, np.nan, "bar"], [3, np.nan, "baz"]], - columns=["A", "B", "C"], - ) - expected = DataFrame( - [[True, True], [False, True]], columns=["B", "C"], index=[1, 3] - ) - expected.index.name = "A" - result = df.groupby("A").any() - tm.assert_frame_equal(result, expected) - - -@pytest.mark.parametrize("bool_agg_func", ["any", "all"]) -def test_bool_aggs_dup_column_labels(bool_agg_func): - # GH#21668 - df = DataFrame([[True, True]], columns=["a", "a"]) - grp_by = df.groupby([0]) - result = getattr(grp_by, bool_agg_func)() - - expected = df.set_axis(np.array([0])) - tm.assert_frame_equal(result, expected) - - -@pytest.mark.parametrize("bool_agg_func", ["any", "all"]) -@pytest.mark.parametrize( - "data", - [ - [False, False, False], - [True, True, True], - [pd.NA, pd.NA, pd.NA], - [False, pd.NA, False], - [True, pd.NA, True], - [True, pd.NA, False], - ], -) -def test_masked_kleene_logic(bool_agg_func, skipna, data): - # GH#37506 - ser = Series(data, dtype="boolean") - - # The result should match aggregating on the whole series. Correctness - # there is verified in test_reductions.py::test_any_all_boolean_kleene_logic - expected_data = getattr(ser, bool_agg_func)(skipna=skipna) - expected = Series(expected_data, index=np.array([0]), dtype="boolean") - - result = ser.groupby([0, 0, 0]).agg(bool_agg_func, skipna=skipna) - tm.assert_series_equal(result, expected) - - -@pytest.mark.parametrize( - "dtype1,dtype2,exp_col1,exp_col2", - [ - ( - "float", - "Float64", - np.array([True], dtype=bool), - pd.array([pd.NA], dtype="boolean"), - ), - ( - "Int64", - "float", - pd.array([pd.NA], dtype="boolean"), - np.array([True], dtype=bool), - ), - ( - "Int64", - "Int64", - pd.array([pd.NA], dtype="boolean"), - pd.array([pd.NA], dtype="boolean"), - ), - ( - "Float64", - "boolean", - pd.array([pd.NA], dtype="boolean"), - pd.array([pd.NA], dtype="boolean"), - ), - ], -) -def test_masked_mixed_types(dtype1, dtype2, exp_col1, exp_col2): - # GH#37506 - data = [1.0, np.nan] - df = DataFrame( - {"col1": pd.array(data, dtype=dtype1), "col2": pd.array(data, dtype=dtype2)} - ) - result = df.groupby([1, 1]).agg("all", skipna=False) - - expected = DataFrame({"col1": exp_col1, "col2": exp_col2}, index=np.array([1])) - tm.assert_frame_equal(result, expected) - - -@pytest.mark.parametrize("bool_agg_func", ["any", "all"]) -@pytest.mark.parametrize("dtype", ["Int64", "Float64", "boolean"]) -def test_masked_bool_aggs_skipna(bool_agg_func, dtype, skipna, frame_or_series): - # GH#40585 - obj = frame_or_series([pd.NA, 1], dtype=dtype) - expected_res = True - if not skipna and bool_agg_func == "all": - expected_res = pd.NA - expected = frame_or_series([expected_res], index=np.array([1]), dtype="boolean") - - result = obj.groupby([1, 1]).agg(bool_agg_func, skipna=skipna) - tm.assert_equal(result, expected) - - -@pytest.mark.parametrize( - "bool_agg_func,data,expected_res", - [ - ("any", [pd.NA, np.nan], False), - ("any", [pd.NA, 1, np.nan], True), - ("all", [pd.NA, pd.NaT], True), - ("all", [pd.NA, False, pd.NaT], False), - ], -) -def test_object_type_missing_vals(bool_agg_func, data, expected_res, frame_or_series): - # GH#37501 - obj = frame_or_series(data, dtype=object) - result = obj.groupby([1] * len(data)).agg(bool_agg_func) - expected = frame_or_series([expected_res], index=np.array([1]), dtype="bool") - tm.assert_equal(result, expected) - - -@pytest.mark.parametrize("bool_agg_func", ["any", "all"]) -def test_object_NA_raises_with_skipna_false(bool_agg_func): - # GH#37501 - ser = Series([pd.NA], dtype=object) - with pytest.raises(TypeError, match="boolean value of NA is ambiguous"): - ser.groupby([1]).agg(bool_agg_func, skipna=False) - - -@pytest.mark.parametrize("bool_agg_func", ["any", "all"]) -def test_empty(frame_or_series, bool_agg_func): - # GH 45231 - kwargs = {"columns": ["a"]} if frame_or_series is DataFrame else {"name": "a"} - obj = frame_or_series(**kwargs, dtype=object) - result = getattr(obj.groupby(obj.index), bool_agg_func)() - expected = frame_or_series(**kwargs, dtype=bool) - tm.assert_equal(result, expected) diff --git a/pandas/tests/groupby/test_cumulative.py b/pandas/tests/groupby/test_cumulative.py new file mode 100644 index 0000000000000..eecb82cd5050b --- /dev/null +++ b/pandas/tests/groupby/test_cumulative.py @@ -0,0 +1,291 @@ +import numpy as np +import pytest + +from pandas.errors import UnsupportedFunctionCall +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + Series, +) +import pandas._testing as tm + + +@pytest.fixture( + params=[np.int32, np.int64, np.float32, np.float64, "Int64", "Float64"], + ids=["np.int32", "np.int64", "np.float32", "np.float64", "Int64", "Float64"], +) +def dtypes_for_minmax(request): + """ + Fixture of dtypes with min and max values used for testing + cummin and cummax + """ + dtype = request.param + + np_type = dtype + if dtype == "Int64": + np_type = np.int64 + elif dtype == "Float64": + np_type = np.float64 + + min_val = ( + np.iinfo(np_type).min + if np.dtype(np_type).kind == "i" + else np.finfo(np_type).min + ) + max_val = ( + np.iinfo(np_type).max + if np.dtype(np_type).kind == "i" + else np.finfo(np_type).max + ) + + return (dtype, min_val, max_val) + + +def test_groupby_cumprod(): + # GH 4095 + df = DataFrame({"key": ["b"] * 10, "value": 2}) + + actual = df.groupby("key")["value"].cumprod() + expected = df.groupby("key", group_keys=False)["value"].apply(lambda x: x.cumprod()) + expected.name = "value" + tm.assert_series_equal(actual, expected) + + df = DataFrame({"key": ["b"] * 100, "value": 2}) + df["value"] = df["value"].astype(float) + actual = df.groupby("key")["value"].cumprod() + expected = df.groupby("key", group_keys=False)["value"].apply(lambda x: x.cumprod()) + expected.name = "value" + tm.assert_series_equal(actual, expected) + + +def test_groupby_cumprod_overflow(): + # GH#37493 if we overflow we return garbage consistent with numpy + df = DataFrame({"key": ["b"] * 4, "value": 100_000}) + actual = df.groupby("key")["value"].cumprod() + expected = Series( + [100_000, 10_000_000_000, 1_000_000_000_000_000, 7766279631452241920], + name="value", + ) + tm.assert_series_equal(actual, expected) + + numpy_result = df.groupby("key", group_keys=False)["value"].apply( + lambda x: x.cumprod() + ) + numpy_result.name = "value" + tm.assert_series_equal(actual, numpy_result) + + +def test_groupby_cumprod_nan_influences_other_columns(): + # GH#48064 + df = DataFrame( + { + "a": 1, + "b": [1, np.nan, 2], + "c": [1, 2, 3.0], + } + ) + result = df.groupby("a").cumprod(numeric_only=True, skipna=False) + expected = DataFrame({"b": [1, np.nan, np.nan], "c": [1, 2, 6.0]}) + tm.assert_frame_equal(result, expected) + + +def test_cummin(dtypes_for_minmax): + dtype = dtypes_for_minmax[0] + min_val = dtypes_for_minmax[1] + + # GH 15048 + base_df = DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [3, 4, 3, 2, 2, 3, 2, 1]}) + expected_mins = [3, 3, 3, 2, 2, 2, 2, 1] + + df = base_df.astype(dtype) + + expected = DataFrame({"B": expected_mins}).astype(dtype) + result = df.groupby("A").cummin() + tm.assert_frame_equal(result, expected) + result = df.groupby("A", group_keys=False).B.apply(lambda x: x.cummin()).to_frame() + tm.assert_frame_equal(result, expected) + + # Test w/ min value for dtype + df.loc[[2, 6], "B"] = min_val + df.loc[[1, 5], "B"] = min_val + 1 + expected.loc[[2, 3, 6, 7], "B"] = min_val + expected.loc[[1, 5], "B"] = min_val + 1 # should not be rounded to min_val + result = df.groupby("A").cummin() + tm.assert_frame_equal(result, expected, check_exact=True) + expected = ( + df.groupby("A", group_keys=False).B.apply(lambda x: x.cummin()).to_frame() + ) + tm.assert_frame_equal(result, expected, check_exact=True) + + # Test nan in some values + # Explicit cast to float to avoid implicit cast when setting nan + base_df = base_df.astype({"B": "float"}) + base_df.loc[[0, 2, 4, 6], "B"] = np.nan + expected = DataFrame({"B": [np.nan, 4, np.nan, 2, np.nan, 3, np.nan, 1]}) + result = base_df.groupby("A").cummin() + tm.assert_frame_equal(result, expected) + expected = ( + base_df.groupby("A", group_keys=False).B.apply(lambda x: x.cummin()).to_frame() + ) + tm.assert_frame_equal(result, expected) + + # GH 15561 + df = DataFrame({"a": [1], "b": pd.to_datetime(["2001"])}) + expected = Series(pd.to_datetime("2001"), index=[0], name="b") + + result = df.groupby("a")["b"].cummin() + tm.assert_series_equal(expected, result) + + # GH 15635 + df = DataFrame({"a": [1, 2, 1], "b": [1, 2, 2]}) + result = df.groupby("a").b.cummin() + expected = Series([1, 2, 1], name="b") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("method", ["cummin", "cummax"]) +@pytest.mark.parametrize("dtype", ["UInt64", "Int64", "Float64", "float", "boolean"]) +def test_cummin_max_all_nan_column(method, dtype): + base_df = DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [np.nan] * 8}) + base_df["B"] = base_df["B"].astype(dtype) + grouped = base_df.groupby("A") + + expected = DataFrame({"B": [np.nan] * 8}, dtype=dtype) + result = getattr(grouped, method)() + tm.assert_frame_equal(expected, result) + + result = getattr(grouped["B"], method)().to_frame() + tm.assert_frame_equal(expected, result) + + +def test_cummax(dtypes_for_minmax): + dtype = dtypes_for_minmax[0] + max_val = dtypes_for_minmax[2] + + # GH 15048 + base_df = DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [3, 4, 3, 2, 2, 3, 2, 1]}) + expected_maxs = [3, 4, 4, 4, 2, 3, 3, 3] + + df = base_df.astype(dtype) + + expected = DataFrame({"B": expected_maxs}).astype(dtype) + result = df.groupby("A").cummax() + tm.assert_frame_equal(result, expected) + result = df.groupby("A", group_keys=False).B.apply(lambda x: x.cummax()).to_frame() + tm.assert_frame_equal(result, expected) + + # Test w/ max value for dtype + df.loc[[2, 6], "B"] = max_val + expected.loc[[2, 3, 6, 7], "B"] = max_val + result = df.groupby("A").cummax() + tm.assert_frame_equal(result, expected) + expected = ( + df.groupby("A", group_keys=False).B.apply(lambda x: x.cummax()).to_frame() + ) + tm.assert_frame_equal(result, expected) + + # Test nan in some values + # Explicit cast to float to avoid implicit cast when setting nan + base_df = base_df.astype({"B": "float"}) + base_df.loc[[0, 2, 4, 6], "B"] = np.nan + expected = DataFrame({"B": [np.nan, 4, np.nan, 4, np.nan, 3, np.nan, 3]}) + result = base_df.groupby("A").cummax() + tm.assert_frame_equal(result, expected) + expected = ( + base_df.groupby("A", group_keys=False).B.apply(lambda x: x.cummax()).to_frame() + ) + tm.assert_frame_equal(result, expected) + + # GH 15561 + df = DataFrame({"a": [1], "b": pd.to_datetime(["2001"])}) + expected = Series(pd.to_datetime("2001"), index=[0], name="b") + + result = df.groupby("a")["b"].cummax() + tm.assert_series_equal(expected, result) + + # GH 15635 + df = DataFrame({"a": [1, 2, 1], "b": [2, 1, 1]}) + result = df.groupby("a").b.cummax() + expected = Series([2, 1, 2], name="b") + tm.assert_series_equal(result, expected) + + +def test_cummax_i8_at_implementation_bound(): + # the minimum value used to be treated as NPY_NAT+1 instead of NPY_NAT + # for int64 dtype GH#46382 + ser = Series([pd.NaT._value + n for n in range(5)]) + df = DataFrame({"A": 1, "B": ser, "C": ser.view("M8[ns]")}) + gb = df.groupby("A") + + res = gb.cummax() + exp = df[["B", "C"]] + tm.assert_frame_equal(res, exp) + + +@pytest.mark.parametrize("method", ["cummin", "cummax"]) +@pytest.mark.parametrize("dtype", ["float", "Int64", "Float64"]) +@pytest.mark.parametrize( + "groups,expected_data", + [ + ([1, 1, 1], [1, None, None]), + ([1, 2, 3], [1, None, 2]), + ([1, 3, 3], [1, None, None]), + ], +) +def test_cummin_max_skipna(method, dtype, groups, expected_data): + # GH-34047 + df = DataFrame({"a": Series([1, None, 2], dtype=dtype)}) + orig = df.copy() + gb = df.groupby(groups)["a"] + + result = getattr(gb, method)(skipna=False) + expected = Series(expected_data, dtype=dtype, name="a") + + # check we didn't accidentally alter df + tm.assert_frame_equal(df, orig) + + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("method", ["cummin", "cummax"]) +def test_cummin_max_skipna_multiple_cols(method): + # Ensure missing value in "a" doesn't cause "b" to be nan-filled + df = DataFrame({"a": [np.nan, 2.0, 2.0], "b": [2.0, 2.0, 2.0]}) + gb = df.groupby([1, 1, 1])[["a", "b"]] + + result = getattr(gb, method)(skipna=False) + expected = DataFrame({"a": [np.nan, np.nan, np.nan], "b": [2.0, 2.0, 2.0]}) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("func", ["cumprod", "cumsum"]) +def test_numpy_compat(func): + # see gh-12811 + df = DataFrame({"A": [1, 2, 1], "B": [1, 2, 3]}) + g = df.groupby("A") + + msg = "numpy operations are not valid with groupby" + + with pytest.raises(UnsupportedFunctionCall, match=msg): + getattr(g, func)(1, 2, 3) + with pytest.raises(UnsupportedFunctionCall, match=msg): + getattr(g, func)(foo=1) + + +@td.skip_if_32bit +@pytest.mark.parametrize("method", ["cummin", "cummax"]) +@pytest.mark.parametrize( + "dtype,val", [("UInt64", np.iinfo("uint64").max), ("Int64", 2**53 + 1)] +) +def test_nullable_int_not_cast_as_float(method, dtype, val): + data = [val, pd.NA] + df = DataFrame({"grp": [1, 1], "b": data}, dtype=dtype) + grouped = df.groupby("grp") + + result = grouped.transform(method) + expected = DataFrame({"b": data}, dtype=dtype) + + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index 08372541988d0..4876267c72f12 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -1,12 +1,10 @@ import builtins -from io import StringIO import re import numpy as np import pytest from pandas._libs import lib -from pandas.errors import UnsupportedFunctionCall import pandas as pd from pandas import ( @@ -22,37 +20,6 @@ from pandas.util import _test_decorators as td -@pytest.fixture( - params=[np.int32, np.int64, np.float32, np.float64, "Int64", "Float64"], - ids=["np.int32", "np.int64", "np.float32", "np.float64", "Int64", "Float64"], -) -def dtypes_for_minmax(request): - """ - Fixture of dtypes with min and max values used for testing - cummin and cummax - """ - dtype = request.param - - np_type = dtype - if dtype == "Int64": - np_type = np.int64 - elif dtype == "Float64": - np_type = np.float64 - - min_val = ( - np.iinfo(np_type).min - if np.dtype(np_type).kind == "i" - else np.finfo(np_type).min - ) - max_val = ( - np.iinfo(np_type).max - if np.dtype(np_type).kind == "i" - else np.finfo(np_type).max - ) - - return (dtype, min_val, max_val) - - def test_intercept_builtin_sum(): s = Series([1.0, 2.0, np.nan, 3.0]) grouped = s.groupby([0, 1, 2, 2]) @@ -372,39 +339,6 @@ def test_cython_api2(): tm.assert_frame_equal(result, expected) -def test_cython_median(): - arr = np.random.default_rng(2).standard_normal(1000) - arr[::2] = np.nan - df = DataFrame(arr) - - labels = np.random.default_rng(2).integers(0, 50, size=1000).astype(float) - labels[::17] = np.nan - - result = df.groupby(labels).median() - msg = "using DataFrameGroupBy.median" - with tm.assert_produces_warning(FutureWarning, match=msg): - exp = df.groupby(labels).agg(np.nanmedian) - tm.assert_frame_equal(result, exp) - - df = DataFrame(np.random.default_rng(2).standard_normal((1000, 5))) - msg = "using DataFrameGroupBy.median" - with tm.assert_produces_warning(FutureWarning, match=msg): - rs = df.groupby(labels).agg(np.median) - xp = df.groupby(labels).median() - tm.assert_frame_equal(rs, xp) - - -def test_median_empty_bins(observed): - df = DataFrame(np.random.default_rng(2).integers(0, 44, 500)) - - grps = range(0, 55, 5) - bins = pd.cut(df[0], grps) - - result = df.groupby(bins, observed=observed).median() - expected = df.groupby(bins, observed=observed).agg(lambda x: x.median()) - tm.assert_frame_equal(result, expected) - - @pytest.mark.parametrize( "dtype", ["int8", "int16", "int32", "int64", "float32", "float64", "uint64"] ) @@ -478,105 +412,6 @@ def test_groupby_non_arithmetic_agg_int_like_precision(i): assert res.iloc[0].b == data["expected"] -@pytest.mark.parametrize( - "func, values", - [ - ("idxmin", {"c_int": [0, 2], "c_float": [1, 3], "c_date": [1, 2]}), - ("idxmax", {"c_int": [1, 3], "c_float": [0, 2], "c_date": [0, 3]}), - ], -) -@pytest.mark.parametrize("numeric_only", [True, False]) -def test_idxmin_idxmax_returns_int_types(func, values, numeric_only): - # GH 25444 - df = DataFrame( - { - "name": ["A", "A", "B", "B"], - "c_int": [1, 2, 3, 4], - "c_float": [4.02, 3.03, 2.04, 1.05], - "c_date": ["2019", "2018", "2016", "2017"], - } - ) - df["c_date"] = pd.to_datetime(df["c_date"]) - df["c_date_tz"] = df["c_date"].dt.tz_localize("US/Pacific") - df["c_timedelta"] = df["c_date"] - df["c_date"].iloc[0] - df["c_period"] = df["c_date"].dt.to_period("W") - df["c_Integer"] = df["c_int"].astype("Int64") - df["c_Floating"] = df["c_float"].astype("Float64") - - result = getattr(df.groupby("name"), func)(numeric_only=numeric_only) - - expected = DataFrame(values, index=Index(["A", "B"], name="name")) - if numeric_only: - expected = expected.drop(columns=["c_date"]) - else: - expected["c_date_tz"] = expected["c_date"] - expected["c_timedelta"] = expected["c_date"] - expected["c_period"] = expected["c_date"] - expected["c_Integer"] = expected["c_int"] - expected["c_Floating"] = expected["c_float"] - - tm.assert_frame_equal(result, expected) - - -def test_idxmin_idxmax_axis1(): - df = DataFrame( - np.random.default_rng(2).standard_normal((10, 4)), columns=["A", "B", "C", "D"] - ) - df["A"] = [1, 2, 3, 1, 2, 3, 1, 2, 3, 4] - - gb = df.groupby("A") - - warn_msg = "DataFrameGroupBy.idxmax with axis=1 is deprecated" - with tm.assert_produces_warning(FutureWarning, match=warn_msg): - res = gb.idxmax(axis=1) - - alt = df.iloc[:, 1:].idxmax(axis=1) - indexer = res.index.get_level_values(1) - - tm.assert_series_equal(alt[indexer], res.droplevel("A")) - - df["E"] = date_range("2016-01-01", periods=10) - gb2 = df.groupby("A") - - msg = "'>' not supported between instances of 'Timestamp' and 'float'" - with pytest.raises(TypeError, match=msg): - with tm.assert_produces_warning(FutureWarning, match=warn_msg): - gb2.idxmax(axis=1) - - -@pytest.mark.parametrize( - "func, values, expected_values, warn", - [ - ("idxmin", [0, 1, 2], [0, 2], None), - ("idxmax", [0, 1, 2], [1, 2], None), - ("idxmin", [0, np.nan, 2], [np.nan, 2], FutureWarning), - ("idxmax", [0, np.nan, 2], [np.nan, 2], FutureWarning), - ("idxmin", [1, 0, np.nan], [1, np.nan], FutureWarning), - ("idxmax", [1, 0, np.nan], [0, np.nan], FutureWarning), - ], -) -@pytest.mark.parametrize("test_series", [True, False]) -def test_idxmin_idxmax_skipna_false(func, values, expected_values, warn, test_series): - # GH#54234 - df = DataFrame( - { - "a": [1, 1, 2], - "b": values, - } - ) - gb = df.groupby("a") - index = Index([1, 2], name="a") - expected = DataFrame({"b": expected_values}, index=index) - if test_series: - gb = gb["b"] - expected = expected["b"] - klass = "Series" if test_series else "DataFrame" - msg = f"The behavior of {klass}GroupBy.{func} with all-NA values" - with tm.assert_produces_warning(warn, match=msg): - result = getattr(gb, func)(skipna=False) - tm.assert_equal(result, expected) - - @pytest.mark.parametrize("numeric_only", [True, False, None]) def test_axis1_numeric_only(request, groupby_func, numeric_only): if groupby_func in ("idxmax", "idxmin"): @@ -658,54 +493,6 @@ def test_axis1_numeric_only(request, groupby_func, numeric_only): tm.assert_equal(result, expected) -def test_groupby_cumprod(): - # GH 4095 - df = DataFrame({"key": ["b"] * 10, "value": 2}) - - actual = df.groupby("key")["value"].cumprod() - expected = df.groupby("key", group_keys=False)["value"].apply(lambda x: x.cumprod()) - expected.name = "value" - tm.assert_series_equal(actual, expected) - - df = DataFrame({"key": ["b"] * 100, "value": 2}) - df["value"] = df["value"].astype(float) - actual = df.groupby("key")["value"].cumprod() - expected = df.groupby("key", group_keys=False)["value"].apply(lambda x: x.cumprod()) - expected.name = "value" - tm.assert_series_equal(actual, expected) - - -def test_groupby_cumprod_overflow(): - # GH#37493 if we overflow we return garbage consistent with numpy - df = DataFrame({"key": ["b"] * 4, "value": 100_000}) - actual = df.groupby("key")["value"].cumprod() - expected = Series( - [100_000, 10_000_000_000, 1_000_000_000_000_000, 7766279631452241920], - name="value", - ) - tm.assert_series_equal(actual, expected) - - numpy_result = df.groupby("key", group_keys=False)["value"].apply( - lambda x: x.cumprod() - ) - numpy_result.name = "value" - tm.assert_series_equal(actual, numpy_result) - - -def test_groupby_cumprod_nan_influences_other_columns(): - # GH#48064 - df = DataFrame( - { - "a": 1, - "b": [1, np.nan, 2], - "c": [1, 2, 3.0], - } - ) - result = df.groupby("a").cumprod(numeric_only=True, skipna=False) - expected = DataFrame({"b": [1, np.nan, np.nan], "c": [1, 2, 6.0]}) - tm.assert_frame_equal(result, expected) - - def scipy_sem(*args, **kwargs): from scipy.stats import sem @@ -741,627 +528,12 @@ def test_ops_general(op, targop): tm.assert_frame_equal(result, expected) -def test_max_nan_bug(): - raw = """,Date,app,File --04-23,2013-04-23 00:00:00,,log080001.log --05-06,2013-05-06 00:00:00,,log.log --05-07,2013-05-07 00:00:00,OE,xlsx""" - - with tm.assert_produces_warning(UserWarning, match="Could not infer format"): - df = pd.read_csv(StringIO(raw), parse_dates=[0]) - gb = df.groupby("Date") - r = gb[["File"]].max() - e = gb["File"].max().to_frame() - tm.assert_frame_equal(r, e) - assert not r["File"].isna().any() - - -def test_nlargest(): - a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10]) - b = Series(list("a" * 5 + "b" * 5)) - gb = a.groupby(b) - r = gb.nlargest(3) - e = Series( - [7, 5, 3, 10, 9, 6], - index=MultiIndex.from_arrays([list("aaabbb"), [3, 2, 1, 9, 5, 8]]), - ) - tm.assert_series_equal(r, e) - - a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0]) - gb = a.groupby(b) - e = Series( - [3, 2, 1, 3, 3, 2], - index=MultiIndex.from_arrays([list("aaabbb"), [2, 3, 1, 6, 5, 7]]), - ) - tm.assert_series_equal(gb.nlargest(3, keep="last"), e) - - -def test_nlargest_mi_grouper(): - # see gh-21411 - npr = np.random.default_rng(2) - - dts = date_range("20180101", periods=10) - iterables = [dts, ["one", "two"]] - - idx = MultiIndex.from_product(iterables, names=["first", "second"]) - s = Series(npr.standard_normal(20), index=idx) - - result = s.groupby("first").nlargest(1) - - exp_idx = MultiIndex.from_tuples( - [ - (dts[0], dts[0], "one"), - (dts[1], dts[1], "one"), - (dts[2], dts[2], "one"), - (dts[3], dts[3], "two"), - (dts[4], dts[4], "one"), - (dts[5], dts[5], "one"), - (dts[6], dts[6], "one"), - (dts[7], dts[7], "one"), - (dts[8], dts[8], "one"), - (dts[9], dts[9], "one"), - ], - names=["first", "first", "second"], - ) - - exp_values = [ - 0.18905338179353307, - -0.41306354339189344, - 1.799707382720902, - 0.7738065867276614, - 0.28121066979764925, - 0.9775674511260357, - -0.3288239040579627, - 0.45495807124085547, - 0.5452887139646817, - 0.12682784711186987, - ] - - expected = Series(exp_values, index=exp_idx) - tm.assert_series_equal(result, expected, check_exact=False, rtol=1e-3) - - -def test_nsmallest(): - a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10]) - b = Series(list("a" * 5 + "b" * 5)) - gb = a.groupby(b) - r = gb.nsmallest(3) - e = Series( - [1, 2, 3, 0, 4, 6], - index=MultiIndex.from_arrays([list("aaabbb"), [0, 4, 1, 6, 7, 8]]), - ) - tm.assert_series_equal(r, e) - - a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0]) - gb = a.groupby(b) - e = Series( - [0, 1, 1, 0, 1, 2], - index=MultiIndex.from_arrays([list("aaabbb"), [4, 1, 0, 9, 8, 7]]), - ) - tm.assert_series_equal(gb.nsmallest(3, keep="last"), e) - - -@pytest.mark.parametrize( - "data, groups", - [([0, 1, 2, 3], [0, 0, 1, 1]), ([0], [0])], -) -@pytest.mark.parametrize("dtype", [None, *tm.ALL_INT_NUMPY_DTYPES]) -@pytest.mark.parametrize("method", ["nlargest", "nsmallest"]) -def test_nlargest_and_smallest_noop(data, groups, dtype, method): - # GH 15272, GH 16345, GH 29129 - # Test nlargest/smallest when it results in a noop, - # i.e. input is sorted and group size <= n - if dtype is not None: - data = np.array(data, dtype=dtype) - if method == "nlargest": - data = list(reversed(data)) - ser = Series(data, name="a") - result = getattr(ser.groupby(groups), method)(n=2) - expidx = np.array(groups, dtype=int) if isinstance(groups, list) else groups - expected = Series(data, index=MultiIndex.from_arrays([expidx, ser.index]), name="a") - tm.assert_series_equal(result, expected) - - -@pytest.mark.parametrize("func", ["cumprod", "cumsum"]) -def test_numpy_compat(func): - # see gh-12811 - df = DataFrame({"A": [1, 2, 1], "B": [1, 2, 3]}) - g = df.groupby("A") - - msg = "numpy operations are not valid with groupby" - - with pytest.raises(UnsupportedFunctionCall, match=msg): - getattr(g, func)(1, 2, 3) - with pytest.raises(UnsupportedFunctionCall, match=msg): - getattr(g, func)(foo=1) - - -def test_cummin(dtypes_for_minmax): - dtype = dtypes_for_minmax[0] - min_val = dtypes_for_minmax[1] - - # GH 15048 - base_df = DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [3, 4, 3, 2, 2, 3, 2, 1]}) - expected_mins = [3, 3, 3, 2, 2, 2, 2, 1] - - df = base_df.astype(dtype) - - expected = DataFrame({"B": expected_mins}).astype(dtype) - result = df.groupby("A").cummin() - tm.assert_frame_equal(result, expected) - result = df.groupby("A", group_keys=False).B.apply(lambda x: x.cummin()).to_frame() - tm.assert_frame_equal(result, expected) - - # Test w/ min value for dtype - df.loc[[2, 6], "B"] = min_val - df.loc[[1, 5], "B"] = min_val + 1 - expected.loc[[2, 3, 6, 7], "B"] = min_val - expected.loc[[1, 5], "B"] = min_val + 1 # should not be rounded to min_val - result = df.groupby("A").cummin() - tm.assert_frame_equal(result, expected, check_exact=True) - expected = ( - df.groupby("A", group_keys=False).B.apply(lambda x: x.cummin()).to_frame() - ) - tm.assert_frame_equal(result, expected, check_exact=True) - - # Test nan in some values - # Explicit cast to float to avoid implicit cast when setting nan - base_df = base_df.astype({"B": "float"}) - base_df.loc[[0, 2, 4, 6], "B"] = np.nan - expected = DataFrame({"B": [np.nan, 4, np.nan, 2, np.nan, 3, np.nan, 1]}) - result = base_df.groupby("A").cummin() - tm.assert_frame_equal(result, expected) - expected = ( - base_df.groupby("A", group_keys=False).B.apply(lambda x: x.cummin()).to_frame() - ) - tm.assert_frame_equal(result, expected) - - # GH 15561 - df = DataFrame({"a": [1], "b": pd.to_datetime(["2001"])}) - expected = Series(pd.to_datetime("2001"), index=[0], name="b") - - result = df.groupby("a")["b"].cummin() - tm.assert_series_equal(expected, result) - - # GH 15635 - df = DataFrame({"a": [1, 2, 1], "b": [1, 2, 2]}) - result = df.groupby("a").b.cummin() - expected = Series([1, 2, 1], name="b") - tm.assert_series_equal(result, expected) - - -@pytest.mark.parametrize("method", ["cummin", "cummax"]) -@pytest.mark.parametrize("dtype", ["UInt64", "Int64", "Float64", "float", "boolean"]) -def test_cummin_max_all_nan_column(method, dtype): - base_df = DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [np.nan] * 8}) - base_df["B"] = base_df["B"].astype(dtype) - grouped = base_df.groupby("A") - - expected = DataFrame({"B": [np.nan] * 8}, dtype=dtype) - result = getattr(grouped, method)() - tm.assert_frame_equal(expected, result) - - result = getattr(grouped["B"], method)().to_frame() - tm.assert_frame_equal(expected, result) - - -def test_cummax(dtypes_for_minmax): - dtype = dtypes_for_minmax[0] - max_val = dtypes_for_minmax[2] - - # GH 15048 - base_df = DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [3, 4, 3, 2, 2, 3, 2, 1]}) - expected_maxs = [3, 4, 4, 4, 2, 3, 3, 3] - - df = base_df.astype(dtype) - - expected = DataFrame({"B": expected_maxs}).astype(dtype) - result = df.groupby("A").cummax() - tm.assert_frame_equal(result, expected) - result = df.groupby("A", group_keys=False).B.apply(lambda x: x.cummax()).to_frame() - tm.assert_frame_equal(result, expected) - - # Test w/ max value for dtype - df.loc[[2, 6], "B"] = max_val - expected.loc[[2, 3, 6, 7], "B"] = max_val - result = df.groupby("A").cummax() - tm.assert_frame_equal(result, expected) - expected = ( - df.groupby("A", group_keys=False).B.apply(lambda x: x.cummax()).to_frame() - ) - tm.assert_frame_equal(result, expected) - - # Test nan in some values - # Explicit cast to float to avoid implicit cast when setting nan - base_df = base_df.astype({"B": "float"}) - base_df.loc[[0, 2, 4, 6], "B"] = np.nan - expected = DataFrame({"B": [np.nan, 4, np.nan, 4, np.nan, 3, np.nan, 3]}) - result = base_df.groupby("A").cummax() - tm.assert_frame_equal(result, expected) - expected = ( - base_df.groupby("A", group_keys=False).B.apply(lambda x: x.cummax()).to_frame() - ) - tm.assert_frame_equal(result, expected) - - # GH 15561 - df = DataFrame({"a": [1], "b": pd.to_datetime(["2001"])}) - expected = Series(pd.to_datetime("2001"), index=[0], name="b") - - result = df.groupby("a")["b"].cummax() - tm.assert_series_equal(expected, result) - - # GH 15635 - df = DataFrame({"a": [1, 2, 1], "b": [2, 1, 1]}) - result = df.groupby("a").b.cummax() - expected = Series([2, 1, 2], name="b") - tm.assert_series_equal(result, expected) - - -def test_cummax_i8_at_implementation_bound(): - # the minimum value used to be treated as NPY_NAT+1 instead of NPY_NAT - # for int64 dtype GH#46382 - ser = Series([pd.NaT._value + n for n in range(5)]) - df = DataFrame({"A": 1, "B": ser, "C": ser.view("M8[ns]")}) - gb = df.groupby("A") - - res = gb.cummax() - exp = df[["B", "C"]] - tm.assert_frame_equal(res, exp) - - -@pytest.mark.parametrize("method", ["cummin", "cummax"]) -@pytest.mark.parametrize("dtype", ["float", "Int64", "Float64"]) -@pytest.mark.parametrize( - "groups,expected_data", - [ - ([1, 1, 1], [1, None, None]), - ([1, 2, 3], [1, None, 2]), - ([1, 3, 3], [1, None, None]), - ], -) -def test_cummin_max_skipna(method, dtype, groups, expected_data): - # GH-34047 - df = DataFrame({"a": Series([1, None, 2], dtype=dtype)}) - orig = df.copy() - gb = df.groupby(groups)["a"] - - result = getattr(gb, method)(skipna=False) - expected = Series(expected_data, dtype=dtype, name="a") - - # check we didn't accidentally alter df - tm.assert_frame_equal(df, orig) - - tm.assert_series_equal(result, expected) - - -@pytest.mark.parametrize("method", ["cummin", "cummax"]) -def test_cummin_max_skipna_multiple_cols(method): - # Ensure missing value in "a" doesn't cause "b" to be nan-filled - df = DataFrame({"a": [np.nan, 2.0, 2.0], "b": [2.0, 2.0, 2.0]}) - gb = df.groupby([1, 1, 1])[["a", "b"]] - - result = getattr(gb, method)(skipna=False) - expected = DataFrame({"a": [np.nan, np.nan, np.nan], "b": [2.0, 2.0, 2.0]}) - - tm.assert_frame_equal(result, expected) - - -@td.skip_if_32bit -@pytest.mark.parametrize("method", ["cummin", "cummax"]) -@pytest.mark.parametrize( - "dtype,val", [("UInt64", np.iinfo("uint64").max), ("Int64", 2**53 + 1)] -) -def test_nullable_int_not_cast_as_float(method, dtype, val): - data = [val, pd.NA] - df = DataFrame({"grp": [1, 1], "b": data}, dtype=dtype) - grouped = df.groupby("grp") - - result = grouped.transform(method) - expected = DataFrame({"b": data}, dtype=dtype) - - tm.assert_frame_equal(result, expected) - - -@pytest.mark.parametrize( - "in_vals, out_vals", - [ - # Basics: strictly increasing (T), strictly decreasing (F), - # abs val increasing (F), non-strictly increasing (T) - ([1, 2, 5, 3, 2, 0, 4, 5, -6, 1, 1], [True, False, False, True]), - # Test with inf vals - ( - [1, 2.1, np.inf, 3, 2, np.inf, -np.inf, 5, 11, 1, -np.inf], - [True, False, True, False], - ), - # Test with nan vals; should always be False - ( - [1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan], - [False, False, False, False], - ), - ], -) -def test_is_monotonic_increasing(in_vals, out_vals): - # GH 17015 - source_dict = { - "A": ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"], - "B": ["a", "a", "a", "b", "b", "b", "c", "c", "c", "d", "d"], - "C": in_vals, - } - df = DataFrame(source_dict) - result = df.groupby("B").C.is_monotonic_increasing - index = Index(list("abcd"), name="B") - expected = Series(index=index, data=out_vals, name="C") - tm.assert_series_equal(result, expected) - - # Also check result equal to manually taking x.is_monotonic_increasing. - expected = df.groupby(["B"]).C.apply(lambda x: x.is_monotonic_increasing) - tm.assert_series_equal(result, expected) - - -@pytest.mark.parametrize( - "in_vals, out_vals", - [ - # Basics: strictly decreasing (T), strictly increasing (F), - # abs val decreasing (F), non-strictly increasing (T) - ([10, 9, 7, 3, 4, 5, -3, 2, 0, 1, 1], [True, False, False, True]), - # Test with inf vals - ( - [np.inf, 1, -np.inf, np.inf, 2, -3, -np.inf, 5, -3, -np.inf, -np.inf], - [True, True, False, True], - ), - # Test with nan vals; should always be False - ( - [1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan], - [False, False, False, False], - ), - ], -) -def test_is_monotonic_decreasing(in_vals, out_vals): - # GH 17015 - source_dict = { - "A": ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"], - "B": ["a", "a", "a", "b", "b", "b", "c", "c", "c", "d", "d"], - "C": in_vals, - } - - df = DataFrame(source_dict) - result = df.groupby("B").C.is_monotonic_decreasing - index = Index(list("abcd"), name="B") - expected = Series(index=index, data=out_vals, name="C") - tm.assert_series_equal(result, expected) - - -# describe -# -------------------------------- - - -def test_apply_describe_bug(mframe): - grouped = mframe.groupby(level="first") - grouped.describe() # it works! - - -def test_series_describe_multikey(): - ts = tm.makeTimeSeries() - grouped = ts.groupby([lambda x: x.year, lambda x: x.month]) - result = grouped.describe() - tm.assert_series_equal(result["mean"], grouped.mean(), check_names=False) - tm.assert_series_equal(result["std"], grouped.std(), check_names=False) - tm.assert_series_equal(result["min"], grouped.min(), check_names=False) - - -def test_series_describe_single(): - ts = tm.makeTimeSeries() - grouped = ts.groupby(lambda x: x.month) - result = grouped.apply(lambda x: x.describe()) - expected = grouped.describe().stack(future_stack=True) - tm.assert_series_equal(result, expected) - - -@pytest.mark.parametrize("keys", ["key1", ["key1", "key2"]]) -def test_series_describe_as_index(as_index, keys): - # GH#49256 - df = DataFrame( - { - "key1": ["one", "two", "two", "three", "two"], - "key2": ["one", "two", "two", "three", "two"], - "foo2": [1, 2, 4, 4, 6], - } - ) - gb = df.groupby(keys, as_index=as_index)["foo2"] - result = gb.describe() - expected = DataFrame( - { - "key1": ["one", "three", "two"], - "count": [1.0, 1.0, 3.0], - "mean": [1.0, 4.0, 4.0], - "std": [np.nan, np.nan, 2.0], - "min": [1.0, 4.0, 2.0], - "25%": [1.0, 4.0, 3.0], - "50%": [1.0, 4.0, 4.0], - "75%": [1.0, 4.0, 5.0], - "max": [1.0, 4.0, 6.0], - } - ) - if len(keys) == 2: - expected.insert(1, "key2", expected["key1"]) - if as_index: - expected = expected.set_index(keys) - tm.assert_frame_equal(result, expected) - - def test_series_index_name(df): grouped = df.loc[:, ["C"]].groupby(df["A"]) result = grouped.agg(lambda x: x.mean()) assert result.index.name == "A" -def test_frame_describe_multikey(tsframe): - grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month]) - result = grouped.describe() - desc_groups = [] - for col in tsframe: - group = grouped[col].describe() - # GH 17464 - Remove duplicate MultiIndex levels - group_col = MultiIndex( - levels=[[col], group.columns], - codes=[[0] * len(group.columns), range(len(group.columns))], - ) - group = DataFrame(group.values, columns=group_col, index=group.index) - desc_groups.append(group) - expected = pd.concat(desc_groups, axis=1) - tm.assert_frame_equal(result, expected) - - msg = "DataFrame.groupby with axis=1 is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg): - groupedT = tsframe.groupby({"A": 0, "B": 0, "C": 1, "D": 1}, axis=1) - result = groupedT.describe() - expected = tsframe.describe().T - # reverting the change from https://github.com/pandas-dev/pandas/pull/35441/ - expected.index = MultiIndex( - levels=[[0, 1], expected.index], - codes=[[0, 0, 1, 1], range(len(expected.index))], - ) - tm.assert_frame_equal(result, expected) - - -def test_frame_describe_tupleindex(): - # GH 14848 - regression from 0.19.0 to 0.19.1 - df1 = DataFrame( - { - "x": [1, 2, 3, 4, 5] * 3, - "y": [10, 20, 30, 40, 50] * 3, - "z": [100, 200, 300, 400, 500] * 3, - } - ) - df1["k"] = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] * 5 - df2 = df1.rename(columns={"k": "key"}) - msg = "Names should be list-like for a MultiIndex" - with pytest.raises(ValueError, match=msg): - df1.groupby("k").describe() - with pytest.raises(ValueError, match=msg): - df2.groupby("key").describe() - - -def test_frame_describe_unstacked_format(): - # GH 4792 - prices = { - Timestamp("2011-01-06 10:59:05", tz=None): 24990, - Timestamp("2011-01-06 12:43:33", tz=None): 25499, - Timestamp("2011-01-06 12:54:09", tz=None): 25499, - } - volumes = { - Timestamp("2011-01-06 10:59:05", tz=None): 1500000000, - Timestamp("2011-01-06 12:43:33", tz=None): 5000000000, - Timestamp("2011-01-06 12:54:09", tz=None): 100000000, - } - df = DataFrame({"PRICE": prices, "VOLUME": volumes}) - result = df.groupby("PRICE").VOLUME.describe() - data = [ - df[df.PRICE == 24990].VOLUME.describe().values.tolist(), - df[df.PRICE == 25499].VOLUME.describe().values.tolist(), - ] - expected = DataFrame( - data, - index=Index([24990, 25499], name="PRICE"), - columns=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], - ) - tm.assert_frame_equal(result, expected) - - -@pytest.mark.filterwarnings( - "ignore:" - "indexing past lexsort depth may impact performance:" - "pandas.errors.PerformanceWarning" -) -@pytest.mark.parametrize("as_index", [True, False]) -@pytest.mark.parametrize("keys", [["a1"], ["a1", "a2"]]) -def test_describe_with_duplicate_output_column_names(as_index, keys): - # GH 35314 - df = DataFrame( - { - "a1": [99, 99, 99, 88, 88, 88], - "a2": [99, 99, 99, 88, 88, 88], - "b": [1, 2, 3, 4, 5, 6], - "c": [10, 20, 30, 40, 50, 60], - }, - columns=["a1", "a2", "b", "b"], - copy=False, - ) - if keys == ["a1"]: - df = df.drop(columns="a2") - - expected = ( - DataFrame.from_records( - [ - ("b", "count", 3.0, 3.0), - ("b", "mean", 5.0, 2.0), - ("b", "std", 1.0, 1.0), - ("b", "min", 4.0, 1.0), - ("b", "25%", 4.5, 1.5), - ("b", "50%", 5.0, 2.0), - ("b", "75%", 5.5, 2.5), - ("b", "max", 6.0, 3.0), - ("b", "count", 3.0, 3.0), - ("b", "mean", 5.0, 2.0), - ("b", "std", 1.0, 1.0), - ("b", "min", 4.0, 1.0), - ("b", "25%", 4.5, 1.5), - ("b", "50%", 5.0, 2.0), - ("b", "75%", 5.5, 2.5), - ("b", "max", 6.0, 3.0), - ], - ) - .set_index([0, 1]) - .T - ) - expected.columns.names = [None, None] - if len(keys) == 2: - expected.index = MultiIndex( - levels=[[88, 99], [88, 99]], codes=[[0, 1], [0, 1]], names=["a1", "a2"] - ) - else: - expected.index = Index([88, 99], name="a1") - - if not as_index: - expected = expected.reset_index() - - result = df.groupby(keys, as_index=as_index).describe() - - tm.assert_frame_equal(result, expected) - - -def test_describe_duplicate_columns(): - # GH#50806 - df = DataFrame([[0, 1, 2, 3]]) - df.columns = [0, 1, 2, 0] - gb = df.groupby(df[1]) - result = gb.describe(percentiles=[]) - - columns = ["count", "mean", "std", "min", "50%", "max"] - frames = [ - DataFrame([[1.0, val, np.nan, val, val, val]], index=[1], columns=columns) - for val in (0.0, 2.0, 3.0) - ] - expected = pd.concat(frames, axis=1) - expected.columns = MultiIndex( - levels=[[0, 2], columns], - codes=[6 * [0] + 6 * [1] + 6 * [0], 3 * list(range(6))], - ) - expected.index.names = [1] - tm.assert_frame_equal(result, expected) - - -def test_groupby_mean_no_overflow(): - # Regression test for (#22487) - df = DataFrame( - { - "user": ["A", "A", "A", "A", "A"], - "connections": [4970, 4749, 4719, 4704, 18446744073699999744], - } - ) - assert df.groupby("user")["connections"].mean()["A"] == 3689348814740003840 - - @pytest.mark.parametrize( "values", [ @@ -1393,78 +565,6 @@ def test_apply_to_nullable_integer_returns_float(values, function): tm.assert_frame_equal(result, expected) -@pytest.mark.parametrize("min_count", [0, 10]) -def test_groupby_sum_mincount_boolean(min_count): - b = True - a = False - na = np.nan - dfg = pd.array([b, b, na, na, a, a, b], dtype="boolean") - - df = DataFrame({"A": [1, 1, 2, 2, 3, 3, 1], "B": dfg}) - result = df.groupby("A").sum(min_count=min_count) - if min_count == 0: - expected = DataFrame( - {"B": pd.array([3, 0, 0], dtype="Int64")}, - index=Index([1, 2, 3], name="A"), - ) - tm.assert_frame_equal(result, expected) - else: - expected = DataFrame( - {"B": pd.array([pd.NA] * 3, dtype="Int64")}, - index=Index([1, 2, 3], name="A"), - ) - tm.assert_frame_equal(result, expected) - - -def test_groupby_sum_below_mincount_nullable_integer(): - # https://github.com/pandas-dev/pandas/issues/32861 - df = DataFrame({"a": [0, 1, 2], "b": [0, 1, 2], "c": [0, 1, 2]}, dtype="Int64") - grouped = df.groupby("a") - idx = Index([0, 1, 2], name="a", dtype="Int64") - - result = grouped["b"].sum(min_count=2) - expected = Series([pd.NA] * 3, dtype="Int64", index=idx, name="b") - tm.assert_series_equal(result, expected) - - result = grouped.sum(min_count=2) - expected = DataFrame({"b": [pd.NA] * 3, "c": [pd.NA] * 3}, dtype="Int64", index=idx) - tm.assert_frame_equal(result, expected) - - -def test_mean_on_timedelta(): - # GH 17382 - df = DataFrame({"time": pd.to_timedelta(range(10)), "cat": ["A", "B"] * 5}) - result = df.groupby("cat")["time"].mean() - expected = Series( - pd.to_timedelta([4, 5]), name="time", index=Index(["A", "B"], name="cat") - ) - tm.assert_series_equal(result, expected) - - -def test_groupby_sum_timedelta_with_nat(): - # GH#42659 - df = DataFrame( - { - "a": [1, 1, 2, 2], - "b": [pd.Timedelta("1d"), pd.Timedelta("2d"), pd.Timedelta("3d"), pd.NaT], - } - ) - td3 = pd.Timedelta(days=3) - - gb = df.groupby("a") - - res = gb.sum() - expected = DataFrame({"b": [td3, td3]}, index=Index([1, 2], name="a")) - tm.assert_frame_equal(res, expected) - - res = gb["b"].sum() - tm.assert_series_equal(res, expected["b"]) - - res = gb["b"].sum(min_count=2) - expected = Series([td3, pd.NaT], dtype="m8[ns]", name="b", index=expected.index) - tm.assert_series_equal(res, expected) - - @pytest.mark.parametrize( "kernel, has_arg", [ @@ -1706,22 +806,6 @@ def test_groupby_empty_dataset(dtype, kwargs): tm.assert_frame_equal(result, expected) -def test_corrwith_with_1_axis(): - # GH 47723 - df = DataFrame({"a": [1, 1, 2], "b": [3, 7, 4]}) - gb = df.groupby("a") - - msg = "DataFrameGroupBy.corrwith with axis=1 is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg): - result = gb.corrwith(df, axis=1) - index = Index( - data=[(1, 0), (1, 1), (1, 2), (2, 2), (2, 0), (2, 1)], - name=("a", None), - ) - expected = Series([np.nan] * 6, index=index) - tm.assert_series_equal(result, expected) - - def test_multiindex_group_all_columns_when_empty(groupby_func): # GH 32464 df = DataFrame({"a": [], "b": [], "c": []}).set_index(["a", "b", "c"]) diff --git a/pandas/tests/groupby/test_min_max.py b/pandas/tests/groupby/test_min_max.py deleted file mode 100644 index 30c7e1df1e691..0000000000000 --- a/pandas/tests/groupby/test_min_max.py +++ /dev/null @@ -1,272 +0,0 @@ -import numpy as np -import pytest - -from pandas._libs.tslibs import iNaT - -import pandas as pd -from pandas import ( - DataFrame, - Index, - Series, -) -import pandas._testing as tm - - -def test_max_min_non_numeric(): - # #2700 - aa = DataFrame({"nn": [11, 11, 22, 22], "ii": [1, 2, 3, 4], "ss": 4 * ["mama"]}) - - result = aa.groupby("nn").max() - assert "ss" in result - - result = aa.groupby("nn").max(numeric_only=False) - assert "ss" in result - - result = aa.groupby("nn").min() - assert "ss" in result - - result = aa.groupby("nn").min(numeric_only=False) - assert "ss" in result - - -def test_max_min_object_multiple_columns(using_array_manager): - # GH#41111 case where the aggregation is valid for some columns but not - # others; we split object blocks column-wise, consistent with - # DataFrame._reduce - - df = DataFrame( - { - "A": [1, 1, 2, 2, 3], - "B": [1, "foo", 2, "bar", False], - "C": ["a", "b", "c", "d", "e"], - } - ) - df._consolidate_inplace() # should already be consolidate, but double-check - if not using_array_manager: - assert len(df._mgr.blocks) == 2 - - gb = df.groupby("A") - - result = gb[["C"]].max() - # "max" is valid for column "C" but not for "B" - ei = Index([1, 2, 3], name="A") - expected = DataFrame({"C": ["b", "d", "e"]}, index=ei) - tm.assert_frame_equal(result, expected) - - result = gb[["C"]].min() - # "min" is valid for column "C" but not for "B" - ei = Index([1, 2, 3], name="A") - expected = DataFrame({"C": ["a", "c", "e"]}, index=ei) - tm.assert_frame_equal(result, expected) - - -def test_min_date_with_nans(): - # GH26321 - dates = pd.to_datetime( - Series(["2019-05-09", "2019-05-09", "2019-05-09"]), format="%Y-%m-%d" - ).dt.date - df = DataFrame({"a": [np.nan, "1", np.nan], "b": [0, 1, 1], "c": dates}) - - result = df.groupby("b", as_index=False)["c"].min()["c"] - expected = pd.to_datetime( - Series(["2019-05-09", "2019-05-09"], name="c"), format="%Y-%m-%d" - ).dt.date - tm.assert_series_equal(result, expected) - - result = df.groupby("b")["c"].min() - expected.index.name = "b" - tm.assert_series_equal(result, expected) - - -def test_max_inat(): - # GH#40767 dont interpret iNaT as NaN - ser = Series([1, iNaT]) - key = np.array([1, 1], dtype=np.int64) - gb = ser.groupby(key) - - result = gb.max(min_count=2) - expected = Series({1: 1}, dtype=np.int64) - tm.assert_series_equal(result, expected, check_exact=True) - - result = gb.min(min_count=2) - expected = Series({1: iNaT}, dtype=np.int64) - tm.assert_series_equal(result, expected, check_exact=True) - - # not enough entries -> gets masked to NaN - result = gb.min(min_count=3) - expected = Series({1: np.nan}) - tm.assert_series_equal(result, expected, check_exact=True) - - -def test_max_inat_not_all_na(): - # GH#40767 dont interpret iNaT as NaN - - # make sure we dont round iNaT+1 to iNaT - ser = Series([1, iNaT, 2, iNaT + 1]) - gb = ser.groupby([1, 2, 3, 3]) - result = gb.min(min_count=2) - - # Note: in converting to float64, the iNaT + 1 maps to iNaT, i.e. is lossy - expected = Series({1: np.nan, 2: np.nan, 3: iNaT + 1}) - expected.index = expected.index.astype(int) - tm.assert_series_equal(result, expected, check_exact=True) - - -@pytest.mark.parametrize("func", ["min", "max"]) -def test_groupby_aggregate_period_column(func): - # GH 31471 - groups = [1, 2] - periods = pd.period_range("2020", periods=2, freq="Y") - df = DataFrame({"a": groups, "b": periods}) - - result = getattr(df.groupby("a")["b"], func)() - idx = Index([1, 2], name="a") - expected = Series(periods, index=idx, name="b") - - tm.assert_series_equal(result, expected) - - -@pytest.mark.parametrize("func", ["min", "max"]) -def test_groupby_aggregate_period_frame(func): - # GH 31471 - groups = [1, 2] - periods = pd.period_range("2020", periods=2, freq="Y") - df = DataFrame({"a": groups, "b": periods}) - - result = getattr(df.groupby("a"), func)() - idx = Index([1, 2], name="a") - expected = DataFrame({"b": periods}, index=idx) - - tm.assert_frame_equal(result, expected) - - -def test_aggregate_numeric_object_dtype(): - # https://github.com/pandas-dev/pandas/issues/39329 - # simplified case: multiple object columns where one is all-NaN - # -> gets split as the all-NaN is inferred as float - df = DataFrame( - {"key": ["A", "A", "B", "B"], "col1": list("abcd"), "col2": [np.nan] * 4}, - ).astype(object) - result = df.groupby("key").min() - expected = ( - DataFrame( - {"key": ["A", "B"], "col1": ["a", "c"], "col2": [np.nan, np.nan]}, - ) - .set_index("key") - .astype(object) - ) - tm.assert_frame_equal(result, expected) - - # same but with numbers - df = DataFrame( - {"key": ["A", "A", "B", "B"], "col1": list("abcd"), "col2": range(4)}, - ).astype(object) - result = df.groupby("key").min() - expected = ( - DataFrame({"key": ["A", "B"], "col1": ["a", "c"], "col2": [0, 2]}) - .set_index("key") - .astype(object) - ) - tm.assert_frame_equal(result, expected) - - -@pytest.mark.parametrize("func", ["min", "max"]) -def test_aggregate_categorical_lost_index(func: str): - # GH: 28641 groupby drops index, when grouping over categorical column with min/max - ds = Series(["b"], dtype="category").cat.as_ordered() - df = DataFrame({"A": [1997], "B": ds}) - result = df.groupby("A").agg({"B": func}) - expected = DataFrame({"B": ["b"]}, index=Index([1997], name="A")) - - # ordered categorical dtype should be preserved - expected["B"] = expected["B"].astype(ds.dtype) - - tm.assert_frame_equal(result, expected) - - -@pytest.mark.parametrize("dtype", ["Int64", "Int32", "Float64", "Float32", "boolean"]) -def test_groupby_min_max_nullable(dtype): - if dtype == "Int64": - # GH#41743 avoid precision loss - ts = 1618556707013635762 - elif dtype == "boolean": - ts = 0 - else: - ts = 4.0 - - df = DataFrame({"id": [2, 2], "ts": [ts, ts + 1]}) - df["ts"] = df["ts"].astype(dtype) - - gb = df.groupby("id") - - result = gb.min() - expected = df.iloc[:1].set_index("id") - tm.assert_frame_equal(result, expected) - - res_max = gb.max() - expected_max = df.iloc[1:].set_index("id") - tm.assert_frame_equal(res_max, expected_max) - - result2 = gb.min(min_count=3) - expected2 = DataFrame({"ts": [pd.NA]}, index=expected.index, dtype=dtype) - tm.assert_frame_equal(result2, expected2) - - res_max2 = gb.max(min_count=3) - tm.assert_frame_equal(res_max2, expected2) - - # Case with NA values - df2 = DataFrame({"id": [2, 2, 2], "ts": [ts, pd.NA, ts + 1]}) - df2["ts"] = df2["ts"].astype(dtype) - gb2 = df2.groupby("id") - - result3 = gb2.min() - tm.assert_frame_equal(result3, expected) - - res_max3 = gb2.max() - tm.assert_frame_equal(res_max3, expected_max) - - result4 = gb2.min(min_count=100) - tm.assert_frame_equal(result4, expected2) - - res_max4 = gb2.max(min_count=100) - tm.assert_frame_equal(res_max4, expected2) - - -def test_min_max_nullable_uint64_empty_group(): - # don't raise NotImplementedError from libgroupby - cat = pd.Categorical([0] * 10, categories=[0, 1]) - df = DataFrame({"A": cat, "B": pd.array(np.arange(10, dtype=np.uint64))}) - gb = df.groupby("A", observed=False) - - res = gb.min() - - idx = pd.CategoricalIndex([0, 1], dtype=cat.dtype, name="A") - expected = DataFrame({"B": pd.array([0, pd.NA], dtype="UInt64")}, index=idx) - tm.assert_frame_equal(res, expected) - - res = gb.max() - expected.iloc[0, 0] = 9 - tm.assert_frame_equal(res, expected) - - -@pytest.mark.parametrize("func", ["first", "last", "min", "max"]) -def test_groupby_min_max_categorical(func): - # GH: 52151 - df = DataFrame( - { - "col1": pd.Categorical(["A"], categories=list("AB"), ordered=True), - "col2": pd.Categorical([1], categories=[1, 2], ordered=True), - "value": 0.1, - } - ) - result = getattr(df.groupby("col1", observed=False), func)() - - idx = pd.CategoricalIndex(data=["A", "B"], name="col1", ordered=True) - expected = DataFrame( - { - "col2": pd.Categorical([1, None], categories=[1, 2], ordered=True), - "value": [0.1, None], - }, - index=idx, - ) - tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/test_nunique.py b/pandas/tests/groupby/test_nunique.py deleted file mode 100644 index 9c9e32d9ce226..0000000000000 --- a/pandas/tests/groupby/test_nunique.py +++ /dev/null @@ -1,190 +0,0 @@ -import datetime as dt -from string import ascii_lowercase - -import numpy as np -import pytest - -import pandas as pd -from pandas import ( - DataFrame, - MultiIndex, - NaT, - Series, - Timestamp, - date_range, -) -import pandas._testing as tm - - -@pytest.mark.slow -@pytest.mark.parametrize("sort", [False, True]) -@pytest.mark.parametrize("dropna", [False, True]) -@pytest.mark.parametrize("as_index", [True, False]) -@pytest.mark.parametrize("with_nan", [True, False]) -@pytest.mark.parametrize("keys", [["joe"], ["joe", "jim"]]) -def test_series_groupby_nunique(sort, dropna, as_index, with_nan, keys): - n = 100 - m = 10 - days = date_range("2015-08-23", periods=10) - df = DataFrame( - { - "jim": np.random.default_rng(2).choice(list(ascii_lowercase), n), - "joe": np.random.default_rng(2).choice(days, n), - "julie": np.random.default_rng(2).integers(0, m, n), - } - ) - if with_nan: - df = df.astype({"julie": float}) # Explicit cast to avoid implicit cast below - df.loc[1::17, "jim"] = None - df.loc[3::37, "joe"] = None - df.loc[7::19, "julie"] = None - df.loc[8::19, "julie"] = None - df.loc[9::19, "julie"] = None - original_df = df.copy() - gr = df.groupby(keys, as_index=as_index, sort=sort) - left = gr["julie"].nunique(dropna=dropna) - - gr = df.groupby(keys, as_index=as_index, sort=sort) - right = gr["julie"].apply(Series.nunique, dropna=dropna) - if not as_index: - right = right.reset_index(drop=True) - - if as_index: - tm.assert_series_equal(left, right, check_names=False) - else: - tm.assert_frame_equal(left, right, check_names=False) - tm.assert_frame_equal(df, original_df) - - -def test_nunique(): - df = DataFrame({"A": list("abbacc"), "B": list("abxacc"), "C": list("abbacx")}) - - expected = DataFrame({"A": list("abc"), "B": [1, 2, 1], "C": [1, 1, 2]}) - result = df.groupby("A", as_index=False).nunique() - tm.assert_frame_equal(result, expected) - - # as_index - expected.index = list("abc") - expected.index.name = "A" - expected = expected.drop(columns="A") - result = df.groupby("A").nunique() - tm.assert_frame_equal(result, expected) - - # with na - result = df.replace({"x": None}).groupby("A").nunique(dropna=False) - tm.assert_frame_equal(result, expected) - - # dropna - expected = DataFrame({"B": [1] * 3, "C": [1] * 3}, index=list("abc")) - expected.index.name = "A" - result = df.replace({"x": None}).groupby("A").nunique() - tm.assert_frame_equal(result, expected) - - -def test_nunique_with_object(): - # GH 11077 - data = DataFrame( - [ - [100, 1, "Alice"], - [200, 2, "Bob"], - [300, 3, "Charlie"], - [-400, 4, "Dan"], - [500, 5, "Edith"], - ], - columns=["amount", "id", "name"], - ) - - result = data.groupby(["id", "amount"])["name"].nunique() - index = MultiIndex.from_arrays([data.id, data.amount]) - expected = Series([1] * 5, name="name", index=index) - tm.assert_series_equal(result, expected) - - -def test_nunique_with_empty_series(): - # GH 12553 - data = Series(name="name", dtype=object) - result = data.groupby(level=0).nunique() - expected = Series(name="name", dtype="int64") - tm.assert_series_equal(result, expected) - - -def test_nunique_with_timegrouper(): - # GH 13453 - test = DataFrame( - { - "time": [ - Timestamp("2016-06-28 09:35:35"), - Timestamp("2016-06-28 16:09:30"), - Timestamp("2016-06-28 16:46:28"), - ], - "data": ["1", "2", "3"], - } - ).set_index("time") - result = test.groupby(pd.Grouper(freq="h"))["data"].nunique() - expected = test.groupby(pd.Grouper(freq="h"))["data"].apply(Series.nunique) - tm.assert_series_equal(result, expected) - - -@pytest.mark.parametrize( - "key, data, dropna, expected", - [ - ( - ["x", "x", "x"], - [Timestamp("2019-01-01"), NaT, Timestamp("2019-01-01")], - True, - Series([1], index=pd.Index(["x"], name="key"), name="data"), - ), - ( - ["x", "x", "x"], - [dt.date(2019, 1, 1), NaT, dt.date(2019, 1, 1)], - True, - Series([1], index=pd.Index(["x"], name="key"), name="data"), - ), - ( - ["x", "x", "x", "y", "y"], - [dt.date(2019, 1, 1), NaT, dt.date(2019, 1, 1), NaT, dt.date(2019, 1, 1)], - False, - Series([2, 2], index=pd.Index(["x", "y"], name="key"), name="data"), - ), - ( - ["x", "x", "x", "x", "y"], - [dt.date(2019, 1, 1), NaT, dt.date(2019, 1, 1), NaT, dt.date(2019, 1, 1)], - False, - Series([2, 1], index=pd.Index(["x", "y"], name="key"), name="data"), - ), - ], -) -def test_nunique_with_NaT(key, data, dropna, expected): - # GH 27951 - df = DataFrame({"key": key, "data": data}) - result = df.groupby(["key"])["data"].nunique(dropna=dropna) - tm.assert_series_equal(result, expected) - - -def test_nunique_preserves_column_level_names(): - # GH 23222 - test = DataFrame([1, 2, 2], columns=pd.Index(["A"], name="level_0")) - result = test.groupby([0, 0, 0]).nunique() - expected = DataFrame([2], index=np.array([0]), columns=test.columns) - tm.assert_frame_equal(result, expected) - - -def test_nunique_transform_with_datetime(): - # GH 35109 - transform with nunique on datetimes results in integers - df = DataFrame(date_range("2008-12-31", "2009-01-02"), columns=["date"]) - result = df.groupby([0, 0, 1])["date"].transform("nunique") - expected = Series([2, 2, 1], name="date") - tm.assert_series_equal(result, expected) - - -def test_empty_categorical(observed): - # GH#21334 - cat = Series([1]).astype("category") - ser = cat[:0] - gb = ser.groupby(ser, observed=observed) - result = gb.nunique() - if observed: - expected = Series([], index=cat[:0], dtype="int64") - else: - expected = Series([0], index=cat, dtype="int64") - tm.assert_series_equal(result, expected) diff --git a/pandas/tests/groupby/test_reductions.py b/pandas/tests/groupby/test_reductions.py new file mode 100644 index 0000000000000..fdfb211ac2269 --- /dev/null +++ b/pandas/tests/groupby/test_reductions.py @@ -0,0 +1,838 @@ +import builtins +import datetime as dt +from io import StringIO +from string import ascii_lowercase + +import numpy as np +import pytest + +from pandas._libs.tslibs import iNaT + +import pandas as pd +from pandas import ( + DataFrame, + MultiIndex, + Series, + Timestamp, + date_range, + isna, +) +import pandas._testing as tm + + +@pytest.mark.parametrize("agg_func", ["any", "all"]) +@pytest.mark.parametrize( + "vals", + [ + ["foo", "bar", "baz"], + ["foo", "", ""], + ["", "", ""], + [1, 2, 3], + [1, 0, 0], + [0, 0, 0], + [1.0, 2.0, 3.0], + [1.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [True, True, True], + [True, False, False], + [False, False, False], + [np.nan, np.nan, np.nan], + ], +) +def test_groupby_bool_aggs(skipna, agg_func, vals): + df = DataFrame({"key": ["a"] * 3 + ["b"] * 3, "val": vals * 2}) + + # Figure out expectation using Python builtin + exp = getattr(builtins, agg_func)(vals) + + # edge case for missing data with skipna and 'any' + if skipna and all(isna(vals)) and agg_func == "any": + exp = False + + expected = DataFrame( + [exp] * 2, columns=["val"], index=pd.Index(["a", "b"], name="key") + ) + result = getattr(df.groupby("key"), agg_func)(skipna=skipna) + tm.assert_frame_equal(result, expected) + + +def test_any(): + df = DataFrame( + [[1, 2, "foo"], [1, np.nan, "bar"], [3, np.nan, "baz"]], + columns=["A", "B", "C"], + ) + expected = DataFrame( + [[True, True], [False, True]], columns=["B", "C"], index=[1, 3] + ) + expected.index.name = "A" + result = df.groupby("A").any() + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("bool_agg_func", ["any", "all"]) +def test_bool_aggs_dup_column_labels(bool_agg_func): + # GH#21668 + df = DataFrame([[True, True]], columns=["a", "a"]) + grp_by = df.groupby([0]) + result = getattr(grp_by, bool_agg_func)() + + expected = df.set_axis(np.array([0])) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("bool_agg_func", ["any", "all"]) +@pytest.mark.parametrize( + "data", + [ + [False, False, False], + [True, True, True], + [pd.NA, pd.NA, pd.NA], + [False, pd.NA, False], + [True, pd.NA, True], + [True, pd.NA, False], + ], +) +def test_masked_kleene_logic(bool_agg_func, skipna, data): + # GH#37506 + ser = Series(data, dtype="boolean") + + # The result should match aggregating on the whole series. Correctness + # there is verified in test_reductions.py::test_any_all_boolean_kleene_logic + expected_data = getattr(ser, bool_agg_func)(skipna=skipna) + expected = Series(expected_data, index=np.array([0]), dtype="boolean") + + result = ser.groupby([0, 0, 0]).agg(bool_agg_func, skipna=skipna) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "dtype1,dtype2,exp_col1,exp_col2", + [ + ( + "float", + "Float64", + np.array([True], dtype=bool), + pd.array([pd.NA], dtype="boolean"), + ), + ( + "Int64", + "float", + pd.array([pd.NA], dtype="boolean"), + np.array([True], dtype=bool), + ), + ( + "Int64", + "Int64", + pd.array([pd.NA], dtype="boolean"), + pd.array([pd.NA], dtype="boolean"), + ), + ( + "Float64", + "boolean", + pd.array([pd.NA], dtype="boolean"), + pd.array([pd.NA], dtype="boolean"), + ), + ], +) +def test_masked_mixed_types(dtype1, dtype2, exp_col1, exp_col2): + # GH#37506 + data = [1.0, np.nan] + df = DataFrame( + {"col1": pd.array(data, dtype=dtype1), "col2": pd.array(data, dtype=dtype2)} + ) + result = df.groupby([1, 1]).agg("all", skipna=False) + + expected = DataFrame({"col1": exp_col1, "col2": exp_col2}, index=np.array([1])) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("bool_agg_func", ["any", "all"]) +@pytest.mark.parametrize("dtype", ["Int64", "Float64", "boolean"]) +def test_masked_bool_aggs_skipna(bool_agg_func, dtype, skipna, frame_or_series): + # GH#40585 + obj = frame_or_series([pd.NA, 1], dtype=dtype) + expected_res = True + if not skipna and bool_agg_func == "all": + expected_res = pd.NA + expected = frame_or_series([expected_res], index=np.array([1]), dtype="boolean") + + result = obj.groupby([1, 1]).agg(bool_agg_func, skipna=skipna) + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize( + "bool_agg_func,data,expected_res", + [ + ("any", [pd.NA, np.nan], False), + ("any", [pd.NA, 1, np.nan], True), + ("all", [pd.NA, pd.NaT], True), + ("all", [pd.NA, False, pd.NaT], False), + ], +) +def test_object_type_missing_vals(bool_agg_func, data, expected_res, frame_or_series): + # GH#37501 + obj = frame_or_series(data, dtype=object) + result = obj.groupby([1] * len(data)).agg(bool_agg_func) + expected = frame_or_series([expected_res], index=np.array([1]), dtype="bool") + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize("bool_agg_func", ["any", "all"]) +def test_object_NA_raises_with_skipna_false(bool_agg_func): + # GH#37501 + ser = Series([pd.NA], dtype=object) + with pytest.raises(TypeError, match="boolean value of NA is ambiguous"): + ser.groupby([1]).agg(bool_agg_func, skipna=False) + + +@pytest.mark.parametrize("bool_agg_func", ["any", "all"]) +def test_empty(frame_or_series, bool_agg_func): + # GH 45231 + kwargs = {"columns": ["a"]} if frame_or_series is DataFrame else {"name": "a"} + obj = frame_or_series(**kwargs, dtype=object) + result = getattr(obj.groupby(obj.index), bool_agg_func)() + expected = frame_or_series(**kwargs, dtype=bool) + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize( + "func, values", + [ + ("idxmin", {"c_int": [0, 2], "c_float": [1, 3], "c_date": [1, 2]}), + ("idxmax", {"c_int": [1, 3], "c_float": [0, 2], "c_date": [0, 3]}), + ], +) +@pytest.mark.parametrize("numeric_only", [True, False]) +def test_idxmin_idxmax_returns_int_types(func, values, numeric_only): + # GH 25444 + df = DataFrame( + { + "name": ["A", "A", "B", "B"], + "c_int": [1, 2, 3, 4], + "c_float": [4.02, 3.03, 2.04, 1.05], + "c_date": ["2019", "2018", "2016", "2017"], + } + ) + df["c_date"] = pd.to_datetime(df["c_date"]) + df["c_date_tz"] = df["c_date"].dt.tz_localize("US/Pacific") + df["c_timedelta"] = df["c_date"] - df["c_date"].iloc[0] + df["c_period"] = df["c_date"].dt.to_period("W") + df["c_Integer"] = df["c_int"].astype("Int64") + df["c_Floating"] = df["c_float"].astype("Float64") + + result = getattr(df.groupby("name"), func)(numeric_only=numeric_only) + + expected = DataFrame(values, index=pd.Index(["A", "B"], name="name")) + if numeric_only: + expected = expected.drop(columns=["c_date"]) + else: + expected["c_date_tz"] = expected["c_date"] + expected["c_timedelta"] = expected["c_date"] + expected["c_period"] = expected["c_date"] + expected["c_Integer"] = expected["c_int"] + expected["c_Floating"] = expected["c_float"] + + tm.assert_frame_equal(result, expected) + + +def test_idxmin_idxmax_axis1(): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), columns=["A", "B", "C", "D"] + ) + df["A"] = [1, 2, 3, 1, 2, 3, 1, 2, 3, 4] + + gb = df.groupby("A") + + warn_msg = "DataFrameGroupBy.idxmax with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + res = gb.idxmax(axis=1) + + alt = df.iloc[:, 1:].idxmax(axis=1) + indexer = res.index.get_level_values(1) + + tm.assert_series_equal(alt[indexer], res.droplevel("A")) + + df["E"] = date_range("2016-01-01", periods=10) + gb2 = df.groupby("A") + + msg = "'>' not supported between instances of 'Timestamp' and 'float'" + with pytest.raises(TypeError, match=msg): + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + gb2.idxmax(axis=1) + + +def test_groupby_mean_no_overflow(): + # Regression test for (#22487) + df = DataFrame( + { + "user": ["A", "A", "A", "A", "A"], + "connections": [4970, 4749, 4719, 4704, 18446744073699999744], + } + ) + assert df.groupby("user")["connections"].mean()["A"] == 3689348814740003840 + + +def test_mean_on_timedelta(): + # GH 17382 + df = DataFrame({"time": pd.to_timedelta(range(10)), "cat": ["A", "B"] * 5}) + result = df.groupby("cat")["time"].mean() + expected = Series( + pd.to_timedelta([4, 5]), name="time", index=pd.Index(["A", "B"], name="cat") + ) + tm.assert_series_equal(result, expected) + + +def test_cython_median(): + arr = np.random.default_rng(2).standard_normal(1000) + arr[::2] = np.nan + df = DataFrame(arr) + + labels = np.random.default_rng(2).integers(0, 50, size=1000).astype(float) + labels[::17] = np.nan + + result = df.groupby(labels).median() + msg = "using DataFrameGroupBy.median" + with tm.assert_produces_warning(FutureWarning, match=msg): + exp = df.groupby(labels).agg(np.nanmedian) + tm.assert_frame_equal(result, exp) + + df = DataFrame(np.random.default_rng(2).standard_normal((1000, 5))) + msg = "using DataFrameGroupBy.median" + with tm.assert_produces_warning(FutureWarning, match=msg): + rs = df.groupby(labels).agg(np.median) + xp = df.groupby(labels).median() + tm.assert_frame_equal(rs, xp) + + +def test_median_empty_bins(observed): + df = DataFrame(np.random.default_rng(2).integers(0, 44, 500)) + + grps = range(0, 55, 5) + bins = pd.cut(df[0], grps) + + result = df.groupby(bins, observed=observed).median() + expected = df.groupby(bins, observed=observed).agg(lambda x: x.median()) + tm.assert_frame_equal(result, expected) + + +def test_max_min_non_numeric(): + # #2700 + aa = DataFrame({"nn": [11, 11, 22, 22], "ii": [1, 2, 3, 4], "ss": 4 * ["mama"]}) + + result = aa.groupby("nn").max() + assert "ss" in result + + result = aa.groupby("nn").max(numeric_only=False) + assert "ss" in result + + result = aa.groupby("nn").min() + assert "ss" in result + + result = aa.groupby("nn").min(numeric_only=False) + assert "ss" in result + + +def test_max_min_object_multiple_columns(using_array_manager): + # GH#41111 case where the aggregation is valid for some columns but not + # others; we split object blocks column-wise, consistent with + # DataFrame._reduce + + df = DataFrame( + { + "A": [1, 1, 2, 2, 3], + "B": [1, "foo", 2, "bar", False], + "C": ["a", "b", "c", "d", "e"], + } + ) + df._consolidate_inplace() # should already be consolidate, but double-check + if not using_array_manager: + assert len(df._mgr.blocks) == 2 + + gb = df.groupby("A") + + result = gb[["C"]].max() + # "max" is valid for column "C" but not for "B" + ei = pd.Index([1, 2, 3], name="A") + expected = DataFrame({"C": ["b", "d", "e"]}, index=ei) + tm.assert_frame_equal(result, expected) + + result = gb[["C"]].min() + # "min" is valid for column "C" but not for "B" + ei = pd.Index([1, 2, 3], name="A") + expected = DataFrame({"C": ["a", "c", "e"]}, index=ei) + tm.assert_frame_equal(result, expected) + + +def test_min_date_with_nans(): + # GH26321 + dates = pd.to_datetime( + Series(["2019-05-09", "2019-05-09", "2019-05-09"]), format="%Y-%m-%d" + ).dt.date + df = DataFrame({"a": [np.nan, "1", np.nan], "b": [0, 1, 1], "c": dates}) + + result = df.groupby("b", as_index=False)["c"].min()["c"] + expected = pd.to_datetime( + Series(["2019-05-09", "2019-05-09"], name="c"), format="%Y-%m-%d" + ).dt.date + tm.assert_series_equal(result, expected) + + result = df.groupby("b")["c"].min() + expected.index.name = "b" + tm.assert_series_equal(result, expected) + + +def test_max_inat(): + # GH#40767 dont interpret iNaT as NaN + ser = Series([1, iNaT]) + key = np.array([1, 1], dtype=np.int64) + gb = ser.groupby(key) + + result = gb.max(min_count=2) + expected = Series({1: 1}, dtype=np.int64) + tm.assert_series_equal(result, expected, check_exact=True) + + result = gb.min(min_count=2) + expected = Series({1: iNaT}, dtype=np.int64) + tm.assert_series_equal(result, expected, check_exact=True) + + # not enough entries -> gets masked to NaN + result = gb.min(min_count=3) + expected = Series({1: np.nan}) + tm.assert_series_equal(result, expected, check_exact=True) + + +def test_max_inat_not_all_na(): + # GH#40767 dont interpret iNaT as NaN + + # make sure we dont round iNaT+1 to iNaT + ser = Series([1, iNaT, 2, iNaT + 1]) + gb = ser.groupby([1, 2, 3, 3]) + result = gb.min(min_count=2) + + # Note: in converting to float64, the iNaT + 1 maps to iNaT, i.e. is lossy + expected = Series({1: np.nan, 2: np.nan, 3: iNaT + 1}) + expected.index = expected.index.astype(int) + tm.assert_series_equal(result, expected, check_exact=True) + + +@pytest.mark.parametrize("func", ["min", "max"]) +def test_groupby_aggregate_period_column(func): + # GH 31471 + groups = [1, 2] + periods = pd.period_range("2020", periods=2, freq="Y") + df = DataFrame({"a": groups, "b": periods}) + + result = getattr(df.groupby("a")["b"], func)() + idx = pd.Index([1, 2], name="a") + expected = Series(periods, index=idx, name="b") + + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("func", ["min", "max"]) +def test_groupby_aggregate_period_frame(func): + # GH 31471 + groups = [1, 2] + periods = pd.period_range("2020", periods=2, freq="Y") + df = DataFrame({"a": groups, "b": periods}) + + result = getattr(df.groupby("a"), func)() + idx = pd.Index([1, 2], name="a") + expected = DataFrame({"b": periods}, index=idx) + + tm.assert_frame_equal(result, expected) + + +def test_aggregate_numeric_object_dtype(): + # https://github.com/pandas-dev/pandas/issues/39329 + # simplified case: multiple object columns where one is all-NaN + # -> gets split as the all-NaN is inferred as float + df = DataFrame( + {"key": ["A", "A", "B", "B"], "col1": list("abcd"), "col2": [np.nan] * 4}, + ).astype(object) + result = df.groupby("key").min() + expected = ( + DataFrame( + {"key": ["A", "B"], "col1": ["a", "c"], "col2": [np.nan, np.nan]}, + ) + .set_index("key") + .astype(object) + ) + tm.assert_frame_equal(result, expected) + + # same but with numbers + df = DataFrame( + {"key": ["A", "A", "B", "B"], "col1": list("abcd"), "col2": range(4)}, + ).astype(object) + result = df.groupby("key").min() + expected = ( + DataFrame({"key": ["A", "B"], "col1": ["a", "c"], "col2": [0, 2]}) + .set_index("key") + .astype(object) + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("func", ["min", "max"]) +def test_aggregate_categorical_lost_index(func: str): + # GH: 28641 groupby drops index, when grouping over categorical column with min/max + ds = Series(["b"], dtype="category").cat.as_ordered() + df = DataFrame({"A": [1997], "B": ds}) + result = df.groupby("A").agg({"B": func}) + expected = DataFrame({"B": ["b"]}, index=pd.Index([1997], name="A")) + + # ordered categorical dtype should be preserved + expected["B"] = expected["B"].astype(ds.dtype) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["Int64", "Int32", "Float64", "Float32", "boolean"]) +def test_groupby_min_max_nullable(dtype): + if dtype == "Int64": + # GH#41743 avoid precision loss + ts = 1618556707013635762 + elif dtype == "boolean": + ts = 0 + else: + ts = 4.0 + + df = DataFrame({"id": [2, 2], "ts": [ts, ts + 1]}) + df["ts"] = df["ts"].astype(dtype) + + gb = df.groupby("id") + + result = gb.min() + expected = df.iloc[:1].set_index("id") + tm.assert_frame_equal(result, expected) + + res_max = gb.max() + expected_max = df.iloc[1:].set_index("id") + tm.assert_frame_equal(res_max, expected_max) + + result2 = gb.min(min_count=3) + expected2 = DataFrame({"ts": [pd.NA]}, index=expected.index, dtype=dtype) + tm.assert_frame_equal(result2, expected2) + + res_max2 = gb.max(min_count=3) + tm.assert_frame_equal(res_max2, expected2) + + # Case with NA values + df2 = DataFrame({"id": [2, 2, 2], "ts": [ts, pd.NA, ts + 1]}) + df2["ts"] = df2["ts"].astype(dtype) + gb2 = df2.groupby("id") + + result3 = gb2.min() + tm.assert_frame_equal(result3, expected) + + res_max3 = gb2.max() + tm.assert_frame_equal(res_max3, expected_max) + + result4 = gb2.min(min_count=100) + tm.assert_frame_equal(result4, expected2) + + res_max4 = gb2.max(min_count=100) + tm.assert_frame_equal(res_max4, expected2) + + +def test_min_max_nullable_uint64_empty_group(): + # don't raise NotImplementedError from libgroupby + cat = pd.Categorical([0] * 10, categories=[0, 1]) + df = DataFrame({"A": cat, "B": pd.array(np.arange(10, dtype=np.uint64))}) + gb = df.groupby("A", observed=False) + + res = gb.min() + + idx = pd.CategoricalIndex([0, 1], dtype=cat.dtype, name="A") + expected = DataFrame({"B": pd.array([0, pd.NA], dtype="UInt64")}, index=idx) + tm.assert_frame_equal(res, expected) + + res = gb.max() + expected.iloc[0, 0] = 9 + tm.assert_frame_equal(res, expected) + + +@pytest.mark.parametrize("func", ["first", "last", "min", "max"]) +def test_groupby_min_max_categorical(func): + # GH: 52151 + df = DataFrame( + { + "col1": pd.Categorical(["A"], categories=list("AB"), ordered=True), + "col2": pd.Categorical([1], categories=[1, 2], ordered=True), + "value": 0.1, + } + ) + result = getattr(df.groupby("col1", observed=False), func)() + + idx = pd.CategoricalIndex(data=["A", "B"], name="col1", ordered=True) + expected = DataFrame( + { + "col2": pd.Categorical([1, None], categories=[1, 2], ordered=True), + "value": [0.1, None], + }, + index=idx, + ) + tm.assert_frame_equal(result, expected) + + +def test_max_nan_bug(): + raw = """,Date,app,File +-04-23,2013-04-23 00:00:00,,log080001.log +-05-06,2013-05-06 00:00:00,,log.log +-05-07,2013-05-07 00:00:00,OE,xlsx""" + + with tm.assert_produces_warning(UserWarning, match="Could not infer format"): + df = pd.read_csv(StringIO(raw), parse_dates=[0]) + gb = df.groupby("Date") + r = gb[["File"]].max() + e = gb["File"].max().to_frame() + tm.assert_frame_equal(r, e) + assert not r["File"].isna().any() + + +@pytest.mark.slow +@pytest.mark.parametrize("sort", [False, True]) +@pytest.mark.parametrize("dropna", [False, True]) +@pytest.mark.parametrize("as_index", [True, False]) +@pytest.mark.parametrize("with_nan", [True, False]) +@pytest.mark.parametrize("keys", [["joe"], ["joe", "jim"]]) +def test_series_groupby_nunique(sort, dropna, as_index, with_nan, keys): + n = 100 + m = 10 + days = date_range("2015-08-23", periods=10) + df = DataFrame( + { + "jim": np.random.default_rng(2).choice(list(ascii_lowercase), n), + "joe": np.random.default_rng(2).choice(days, n), + "julie": np.random.default_rng(2).integers(0, m, n), + } + ) + if with_nan: + df = df.astype({"julie": float}) # Explicit cast to avoid implicit cast below + df.loc[1::17, "jim"] = None + df.loc[3::37, "joe"] = None + df.loc[7::19, "julie"] = None + df.loc[8::19, "julie"] = None + df.loc[9::19, "julie"] = None + original_df = df.copy() + gr = df.groupby(keys, as_index=as_index, sort=sort) + left = gr["julie"].nunique(dropna=dropna) + + gr = df.groupby(keys, as_index=as_index, sort=sort) + right = gr["julie"].apply(Series.nunique, dropna=dropna) + if not as_index: + right = right.reset_index(drop=True) + + if as_index: + tm.assert_series_equal(left, right, check_names=False) + else: + tm.assert_frame_equal(left, right, check_names=False) + tm.assert_frame_equal(df, original_df) + + +def test_nunique(): + df = DataFrame({"A": list("abbacc"), "B": list("abxacc"), "C": list("abbacx")}) + + expected = DataFrame({"A": list("abc"), "B": [1, 2, 1], "C": [1, 1, 2]}) + result = df.groupby("A", as_index=False).nunique() + tm.assert_frame_equal(result, expected) + + # as_index + expected.index = list("abc") + expected.index.name = "A" + expected = expected.drop(columns="A") + result = df.groupby("A").nunique() + tm.assert_frame_equal(result, expected) + + # with na + result = df.replace({"x": None}).groupby("A").nunique(dropna=False) + tm.assert_frame_equal(result, expected) + + # dropna + expected = DataFrame({"B": [1] * 3, "C": [1] * 3}, index=list("abc")) + expected.index.name = "A" + result = df.replace({"x": None}).groupby("A").nunique() + tm.assert_frame_equal(result, expected) + + +def test_nunique_with_object(): + # GH 11077 + data = DataFrame( + [ + [100, 1, "Alice"], + [200, 2, "Bob"], + [300, 3, "Charlie"], + [-400, 4, "Dan"], + [500, 5, "Edith"], + ], + columns=["amount", "id", "name"], + ) + + result = data.groupby(["id", "amount"])["name"].nunique() + index = MultiIndex.from_arrays([data.id, data.amount]) + expected = Series([1] * 5, name="name", index=index) + tm.assert_series_equal(result, expected) + + +def test_nunique_with_empty_series(): + # GH 12553 + data = Series(name="name", dtype=object) + result = data.groupby(level=0).nunique() + expected = Series(name="name", dtype="int64") + tm.assert_series_equal(result, expected) + + +def test_nunique_with_timegrouper(): + # GH 13453 + test = DataFrame( + { + "time": [ + Timestamp("2016-06-28 09:35:35"), + Timestamp("2016-06-28 16:09:30"), + Timestamp("2016-06-28 16:46:28"), + ], + "data": ["1", "2", "3"], + } + ).set_index("time") + result = test.groupby(pd.Grouper(freq="h"))["data"].nunique() + expected = test.groupby(pd.Grouper(freq="h"))["data"].apply(Series.nunique) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "key, data, dropna, expected", + [ + ( + ["x", "x", "x"], + [Timestamp("2019-01-01"), pd.NaT, Timestamp("2019-01-01")], + True, + Series([1], index=pd.Index(["x"], name="key"), name="data"), + ), + ( + ["x", "x", "x"], + [dt.date(2019, 1, 1), pd.NaT, dt.date(2019, 1, 1)], + True, + Series([1], index=pd.Index(["x"], name="key"), name="data"), + ), + ( + ["x", "x", "x", "y", "y"], + [ + dt.date(2019, 1, 1), + pd.NaT, + dt.date(2019, 1, 1), + pd.NaT, + dt.date(2019, 1, 1), + ], + False, + Series([2, 2], index=pd.Index(["x", "y"], name="key"), name="data"), + ), + ( + ["x", "x", "x", "x", "y"], + [ + dt.date(2019, 1, 1), + pd.NaT, + dt.date(2019, 1, 1), + pd.NaT, + dt.date(2019, 1, 1), + ], + False, + Series([2, 1], index=pd.Index(["x", "y"], name="key"), name="data"), + ), + ], +) +def test_nunique_with_NaT(key, data, dropna, expected): + # GH 27951 + df = DataFrame({"key": key, "data": data}) + result = df.groupby(["key"])["data"].nunique(dropna=dropna) + tm.assert_series_equal(result, expected) + + +def test_nunique_preserves_column_level_names(): + # GH 23222 + test = DataFrame([1, 2, 2], columns=pd.Index(["A"], name="level_0")) + result = test.groupby([0, 0, 0]).nunique() + expected = DataFrame([2], index=np.array([0]), columns=test.columns) + tm.assert_frame_equal(result, expected) + + +def test_nunique_transform_with_datetime(): + # GH 35109 - transform with nunique on datetimes results in integers + df = DataFrame(date_range("2008-12-31", "2009-01-02"), columns=["date"]) + result = df.groupby([0, 0, 1])["date"].transform("nunique") + expected = Series([2, 2, 1], name="date") + tm.assert_series_equal(result, expected) + + +def test_empty_categorical(observed): + # GH#21334 + cat = Series([1]).astype("category") + ser = cat[:0] + gb = ser.groupby(ser, observed=observed) + result = gb.nunique() + if observed: + expected = Series([], index=cat[:0], dtype="int64") + else: + expected = Series([0], index=cat, dtype="int64") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("min_count", [0, 10]) +def test_groupby_sum_mincount_boolean(min_count): + b = True + a = False + na = np.nan + dfg = pd.array([b, b, na, na, a, a, b], dtype="boolean") + + df = DataFrame({"A": [1, 1, 2, 2, 3, 3, 1], "B": dfg}) + result = df.groupby("A").sum(min_count=min_count) + if min_count == 0: + expected = DataFrame( + {"B": pd.array([3, 0, 0], dtype="Int64")}, + index=pd.Index([1, 2, 3], name="A"), + ) + tm.assert_frame_equal(result, expected) + else: + expected = DataFrame( + {"B": pd.array([pd.NA] * 3, dtype="Int64")}, + index=pd.Index([1, 2, 3], name="A"), + ) + tm.assert_frame_equal(result, expected) + + +def test_groupby_sum_below_mincount_nullable_integer(): + # https://github.com/pandas-dev/pandas/issues/32861 + df = DataFrame({"a": [0, 1, 2], "b": [0, 1, 2], "c": [0, 1, 2]}, dtype="Int64") + grouped = df.groupby("a") + idx = pd.Index([0, 1, 2], name="a", dtype="Int64") + + result = grouped["b"].sum(min_count=2) + expected = Series([pd.NA] * 3, dtype="Int64", index=idx, name="b") + tm.assert_series_equal(result, expected) + + result = grouped.sum(min_count=2) + expected = DataFrame({"b": [pd.NA] * 3, "c": [pd.NA] * 3}, dtype="Int64", index=idx) + tm.assert_frame_equal(result, expected) + + +def test_groupby_sum_timedelta_with_nat(): + # GH#42659 + df = DataFrame( + { + "a": [1, 1, 2, 2], + "b": [pd.Timedelta("1d"), pd.Timedelta("2d"), pd.Timedelta("3d"), pd.NaT], + } + ) + td3 = pd.Timedelta(days=3) + + gb = df.groupby("a") + + res = gb.sum() + expected = DataFrame({"b": [td3, td3]}, index=pd.Index([1, 2], name="a")) + tm.assert_frame_equal(res, expected) + + res = gb["b"].sum() + tm.assert_series_equal(res, expected["b"]) + + res = gb["b"].sum(min_count=2) + expected = Series([td3, pd.NaT], dtype="m8[ns]", name="b", index=expected.index) + tm.assert_series_equal(res, expected)
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Is this a bad idea? Just moving around code without much value? It has a similar structure to `tests.frame.methods`. The end goal is to have `test_function.py` completely removed.
https://api.github.com/repos/pandas-dev/pandas/pulls/55312
2023-09-27T22:06:45Z
2023-10-12T22:31:34Z
2023-10-12T22:31:34Z
2023-10-13T02:14:01Z
doc/source/whatsnew/v0.23.1.rst modified
diff --git a/doc/source/whatsnew/v0.23.1.rst b/doc/source/whatsnew/v0.23.1.rst index b51368c87f991..79d3b8109cf29 100644 --- a/doc/source/whatsnew/v0.23.1.rst +++ b/doc/source/whatsnew/v0.23.1.rst @@ -26,7 +26,7 @@ Fixed regressions **Comparing Series with datetime.date** We've reverted a 0.23.0 change to comparing a :class:`Series` holding datetimes and a ``datetime.date`` object (:issue:`21152`). -In pandas 0.22 and earlier, comparing a Series holding datetimes and ``datetime.date`` objects would coerce the ``datetime.date`` to a datetime before comparing. +In pandas 0.22 and earlier, comparing a series holding datetimes and ``datetime.date`` objects would coerce the ``datetime.date`` to a datetime before comparing. This was inconsistent with Python, NumPy, and :class:`DatetimeIndex`, which never consider a datetime and ``datetime.date`` equal. In 0.23.0, we unified operations between DatetimeIndex and Series, and in the process changed comparisons between a Series of datetimes and ``datetime.date`` without warning.
- [x] closes #xxxx (Replace xxxx with the GitHub issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55310
2023-09-27T20:28:01Z
2023-09-27T21:31:46Z
null
2023-09-27T21:31:47Z
DOC move resample bug whatsnew notes to 2.2
diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index 87b71eac76d1a..ddd1f95c56aea 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -13,7 +13,6 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ -- Fixed bug in :meth:`DataFrame.resample` where bin edges were not correct for :class:`~pandas.tseries.offsets.MonthBegin` (:issue:`55271`) - Fixed bug where PDEP-6 warning about setting an item of an incompatible dtype was being shown when creating a new conditional column (:issue:`55025`) - Fixed regression in :meth:`DataFrame.join` where result has missing values and dtype is arrow backed string (:issue:`55348`) @@ -26,8 +25,6 @@ Bug fixes - Fixed bug in :meth:`DataFrame.__setitem__` not inferring string dtype for zero-dimensional array with ``infer_string=True`` (:issue:`55366`) - Fixed bug in :meth:`DataFrame.idxmin` and :meth:`DataFrame.idxmax` raising for arrow dtypes (:issue:`55368`) - Fixed bug in :meth:`DataFrame.interpolate` raising incorrect error message (:issue:`55347`) -- Fixed bug in :meth:`DataFrame.resample` not respecting ``closed`` and ``label`` arguments for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55282`) -- Fixed bug in :meth:`DataFrame.resample` where bin edges were not correct for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55281`) - Fixed bug in :meth:`Index.insert` raising when inserting ``None`` into :class:`Index` with ``dtype="string[pyarrow_numpy]"`` (:issue:`55365`) - Fixed bug in :meth:`Series.all` and :meth:`Series.any` not treating missing values correctly for ``dtype="string[pyarrow_numpy]"`` (:issue:`55367`) - Fixed bug in :meth:`Series.rank` for ``string[pyarrow_numpy]`` dtype (:issue:`55362`) diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 4749ceec4a330..01740182ca99e 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -283,6 +283,7 @@ Bug fixes - Bug in :meth:`DataFrame.apply` where passing ``raw=True`` ignored ``args`` passed to the applied function (:issue:`55009`) - Bug in :meth:`pandas.DataFrame.melt` where it would not preserve the datetime (:issue:`55254`) - Bug in :meth:`pandas.read_excel` with a ODS file without cached formatted cell for float values (:issue:`55219`) +- Categorical ^^^^^^^^^^^ @@ -362,7 +363,9 @@ Plotting Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ -- +- Fixed bug in :meth:`DataFrame.resample` not respecting ``closed`` and ``label`` arguments for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55282`) +- Fixed bug in :meth:`DataFrame.resample` where bin edges were not correct for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55281`) +- Fixed bug in :meth:`DataFrame.resample` where bin edges were not correct for :class:`~pandas.tseries.offsets.MonthBegin` (:issue:`55271`) - Reshaping
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55309
2023-09-27T18:55:45Z
2023-10-06T16:14:33Z
2023-10-06T16:14:33Z
2023-10-06T16:55:40Z
ENH: case_when function
diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index efa14dd966eb1..4c7a7b329777b 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -138,7 +138,7 @@ jobs: run: echo "sdist_name=$(cd ./dist && ls -d */)" >> "$GITHUB_ENV" - name: Build wheels - uses: pypa/cibuildwheel@v2.16.1 + uses: pypa/cibuildwheel@v2.16.0 with: package-dir: ./dist/${{ matrix.buildplat[1] == 'macosx_*' && env.sdist_name || needs.build_sdist.outputs.sdist_file }} env: diff --git a/.gitignore b/.gitignore index cd22c2bb8cb5b..d936e1a5269bc 100644 --- a/.gitignore +++ b/.gitignore @@ -129,8 +129,10 @@ doc/source/index.rst doc/build/html/index.html # Windows specific leftover: doc/tmp.sv +doc/tmp.csv env/ doc/source/savefig/ +doc/source/_build # Interactive terminal generated files # ######################################## diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c911edfa03670..b0b511e1048c6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -84,7 +84,7 @@ repos: '--filter=-readability/casting,-runtime/int,-build/include_subdir,-readability/fn_size' ] - repo: https://github.com/pylint-dev/pylint - rev: v3.0.0b0 + rev: v3.0.0a7 hooks: - id: pylint stages: [manual] diff --git a/doc/source/reference/arrays.rst b/doc/source/reference/arrays.rst index fe65364896f54..41ddbd048e6c5 100644 --- a/doc/source/reference/arrays.rst +++ b/doc/source/reference/arrays.rst @@ -134,6 +134,11 @@ is the missing value for datetime data. Timestamp +.. autosummary:: + :toctree: api/ + + NaT + Properties ~~~~~~~~~~ .. autosummary:: @@ -252,6 +257,11 @@ is the missing value for timedelta data. Timedelta +.. autosummary:: + :toctree: api/ + + NaT + Properties ~~~~~~~~~~ .. autosummary:: @@ -455,6 +465,7 @@ pandas provides this through :class:`arrays.IntegerArray`. UInt16Dtype UInt32Dtype UInt64Dtype + NA .. _api.arrays.float_na: @@ -473,6 +484,7 @@ Nullable float Float32Dtype Float64Dtype + NA .. _api.arrays.categorical: @@ -609,6 +621,7 @@ with a bool :class:`numpy.ndarray`. :template: autosummary/class_without_autosummary.rst BooleanDtype + NA .. Dtype attributes which are manually listed in their docstrings: including diff --git a/doc/source/reference/index.rst b/doc/source/reference/index.rst index 7da02f7958416..6d3ce3d31f005 100644 --- a/doc/source/reference/index.rst +++ b/doc/source/reference/index.rst @@ -53,7 +53,6 @@ are mentioned in the documentation. options extensions testing - missing_value .. This is to prevent warnings in the doc build. We don't want to encourage .. these methods. diff --git a/doc/source/reference/missing_value.rst b/doc/source/reference/missing_value.rst deleted file mode 100644 index 3bf22aef765d1..0000000000000 --- a/doc/source/reference/missing_value.rst +++ /dev/null @@ -1,24 +0,0 @@ -{{ header }} - -.. _api.missing_value: - -============== -Missing values -============== -.. currentmodule:: pandas - -NA is the way to represent missing values for nullable dtypes (see below): - -.. autosummary:: - :toctree: api/ - :template: autosummary/class_without_autosummary.rst - - NA - -NaT is the missing value for timedelta and datetime data (see below): - -.. autosummary:: - :toctree: api/ - :template: autosummary/class_without_autosummary.rst - - NaT diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index 38ef8c8455b9d..bc6c26b27885e 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -15,7 +15,7 @@ Fixed regressions ~~~~~~~~~~~~~~~~~ - Fixed bug in :meth:`DataFrame.resample` where bin edges were not correct for :class:`~pandas.tseries.offsets.MonthBegin` (:issue:`55271`) - Fixed bug where PDEP-6 warning about setting an item of an incompatible dtype was being shown when creating a new conditional column (:issue:`55025`) -- Fixed regression in :meth:`DataFrame.join` where result has missing values and dtype is arrow backed string (:issue:`55348`) +- .. --------------------------------------------------------------------------- .. _whatsnew_212.bug_fixes: diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index fa3cef6d9457d..de529c9beaf8d 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -14,6 +14,27 @@ including other versions of pandas. Enhancements ~~~~~~~~~~~~ + +.. _whatsnew_220.enhancements.case_when: + +Create Series based on one or more conditions +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The :func:`case_when` function has been added to create a Series object based on one or more conditions. (:issue:`39154`) + +.. ipython:: python + + import pandas as pd + + df = pd.DataFrame(dict(a=[1, 2, 3], b=[4, 5, 6])) + df.assign( + new_column=pd.case_when( + df.a == 1, 'first', # condition, replacement + df.a.gt(1) & df.b.eq(5), 'second', + default='default', # optional + ) + ) + .. _whatsnew_220.enhancements.calamine: Calamine engine for :func:`read_excel` @@ -78,7 +99,6 @@ Other enhancements - :meth:`ExtensionArray._explode` interface method added to allow extension type implementations of the ``explode`` method (:issue:`54833`) - :meth:`ExtensionArray.duplicated` added to allow extension type implementations of the ``duplicated`` method (:issue:`55255`) - DataFrame.apply now allows the usage of numba (via ``engine="numba"``) to JIT compile the passed function, allowing for potential speedups (:issue:`54666`) -- Implement masked algorithms for :meth:`Series.value_counts` (:issue:`54984`) - .. --------------------------------------------------------------------------- @@ -220,7 +240,6 @@ Other Deprecations - Deprecated allowing non-keyword arguments in :meth:`DataFrame.to_parquet` except ``path``. (:issue:`54229`) - Deprecated allowing non-keyword arguments in :meth:`DataFrame.to_pickle` except ``path``. (:issue:`54229`) - Deprecated allowing non-keyword arguments in :meth:`DataFrame.to_string` except ``buf``. (:issue:`54229`) -- Deprecated allowing non-keyword arguments in :meth:`DataFrame.to_xml` except ``path_or_buffer``. (:issue:`54229`) - Deprecated automatic downcasting of object-dtype results in :meth:`Series.replace` and :meth:`DataFrame.replace`, explicitly call ``result = result.infer_objects(copy=False)`` instead. To opt in to the future version, use ``pd.set_option("future.no_silent_downcasting", True)`` (:issue:`54710`) - Deprecated downcasting behavior in :meth:`Series.where`, :meth:`DataFrame.where`, :meth:`Series.mask`, :meth:`DataFrame.mask`, :meth:`Series.clip`, :meth:`DataFrame.clip`; in a future version these will not infer object-dtype columns to non-object dtype, or all-round floats to integer dtype. Call ``result.infer_objects(copy=False)`` on the result for object inference, or explicitly cast floats to ints. To opt in to the future version, use ``pd.set_option("future.no_silent_downcasting", True)`` (:issue:`53656`) - Deprecated including the groups in computations when using :meth:`DataFrameGroupBy.apply` and :meth:`DataFrameGroupBy.resample`; pass ``include_groups=False`` to exclude the groups (:issue:`7155`) @@ -283,7 +302,7 @@ Numeric Conversion ^^^^^^^^^^ -- Bug in :meth:`Series.convert_dtypes` not converting all NA column to ``null[pyarrow]`` (:issue:`55346`) +- - Strings @@ -312,7 +331,7 @@ Missing MultiIndex ^^^^^^^^^^ -- Bug in :meth:`MultiIndex.get_indexer` not raising ``ValueError`` when ``method`` provided and index is non-monotonic (:issue:`53452`) +- - I/O diff --git a/pandas/__init__.py b/pandas/__init__.py index 41e34309232ee..18a462a3a08b6 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -73,6 +73,7 @@ notnull, # indexes Index, + case_when, CategoricalIndex, RangeIndex, MultiIndex, @@ -252,6 +253,7 @@ __all__ = [ "ArrowDtype", "BooleanDtype", + "case_when", "Categorical", "CategoricalDtype", "CategoricalIndex", diff --git a/pandas/_libs/hashtable.pyi b/pandas/_libs/hashtable.pyi index 0ac914e86f699..2bc6d74fe6aee 100644 --- a/pandas/_libs/hashtable.pyi +++ b/pandas/_libs/hashtable.pyi @@ -240,7 +240,7 @@ def value_count( values: np.ndarray, dropna: bool, mask: npt.NDArray[np.bool_] | None = ..., -) -> tuple[np.ndarray, npt.NDArray[np.int64], int]: ... # np.ndarray[same-as-values] +) -> tuple[np.ndarray, npt.NDArray[np.int64]]: ... # np.ndarray[same-as-values] # arr and values should have same dtype def ismember( diff --git a/pandas/_libs/hashtable_func_helper.pxi.in b/pandas/_libs/hashtable_func_helper.pxi.in index 19acd4acbdee7..b9cf6011481af 100644 --- a/pandas/_libs/hashtable_func_helper.pxi.in +++ b/pandas/_libs/hashtable_func_helper.pxi.in @@ -36,7 +36,7 @@ cdef value_count_{{dtype}}(ndarray[{{dtype}}] values, bint dropna, const uint8_t cdef value_count_{{dtype}}(const {{dtype}}_t[:] values, bint dropna, const uint8_t[:] mask=None): {{endif}} cdef: - Py_ssize_t i = 0, na_counter = 0, na_add = 0 + Py_ssize_t i = 0 Py_ssize_t n = len(values) kh_{{ttype}}_t *table @@ -49,6 +49,9 @@ cdef value_count_{{dtype}}(const {{dtype}}_t[:] values, bint dropna, const uint8 bint uses_mask = mask is not None bint isna_entry = False + if uses_mask and not dropna: + raise NotImplementedError("uses_mask not implemented with dropna=False") + # we track the order in which keys are first seen (GH39009), # khash-map isn't insertion-ordered, thus: # table maps keys to counts @@ -79,31 +82,25 @@ cdef value_count_{{dtype}}(const {{dtype}}_t[:] values, bint dropna, const uint8 for i in range(n): val = {{to_c_type}}(values[i]) - if uses_mask: - isna_entry = mask[i] - if dropna: - if not uses_mask: + if uses_mask: + isna_entry = mask[i] + else: isna_entry = is_nan_{{c_type}}(val) if not dropna or not isna_entry: - if uses_mask and isna_entry: - na_counter += 1 + k = kh_get_{{ttype}}(table, val) + if k != table.n_buckets: + table.vals[k] += 1 else: - k = kh_get_{{ttype}}(table, val) - if k != table.n_buckets: - table.vals[k] += 1 - else: - k = kh_put_{{ttype}}(table, val, &ret) - table.vals[k] = 1 - result_keys.append(val) + k = kh_put_{{ttype}}(table, val, &ret) + table.vals[k] = 1 + result_keys.append(val) {{endif}} # collect counts in the order corresponding to result_keys: - if na_counter > 0: - na_add = 1 cdef: - int64_t[::1] result_counts = np.empty(table.size + na_add, dtype=np.int64) + int64_t[::1] result_counts = np.empty(table.size, dtype=np.int64) for i in range(table.size): {{if dtype == 'object'}} @@ -113,13 +110,9 @@ cdef value_count_{{dtype}}(const {{dtype}}_t[:] values, bint dropna, const uint8 {{endif}} result_counts[i] = table.vals[k] - if na_counter > 0: - result_counts[table.size] = na_counter - result_keys.append(val) - kh_destroy_{{ttype}}(table) - return result_keys.to_array(), result_counts.base, na_counter + return result_keys.to_array(), result_counts.base @cython.wraparound(False) @@ -406,10 +399,10 @@ def mode(ndarray[htfunc_t] values, bint dropna, const uint8_t[:] mask=None): ndarray[htfunc_t] modes int64_t[::1] counts - int64_t count, _, max_count = -1 + int64_t count, max_count = -1 Py_ssize_t nkeys, k, j = 0 - keys, counts, _ = value_count(values, dropna, mask=mask) + keys, counts = value_count(values, dropna, mask=mask) nkeys = len(keys) modes = np.empty(nkeys, dtype=values.dtype) diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 43252ffb5bf13..20a18cf56779f 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -455,18 +455,18 @@ cpdef array_to_datetime( set out_tzoffset_vals = set() tzinfo tz_out = None bint found_tz = False, found_naive = False - cnp.flatiter it = cnp.PyArray_IterNew(values) + cnp.broadcast mi # specify error conditions assert is_raise or is_ignore or is_coerce result = np.empty((<object>values).shape, dtype="M8[ns]") + mi = cnp.PyArray_MultiIterNew2(result, values) iresult = result.view("i8").ravel() for i in range(n): # Analogous to `val = values[i]` - val = cnp.PyArray_GETITEM(values, cnp.PyArray_ITER_DATA(it)) - cnp.PyArray_ITER_NEXT(it) + val = <object>(<PyObject**>cnp.PyArray_MultiIter_DATA(mi, 1))[0] try: if checknull_with_nat_and_na(val): @@ -511,6 +511,7 @@ cpdef array_to_datetime( if parse_today_now(val, &iresult[i], utc): # We can't _quite_ dispatch this to convert_str_to_tsobject # bc there isn't a nice way to pass "utc" + cnp.PyArray_MultiIter_NEXT(mi) continue _ts = convert_str_to_tsobject( @@ -539,10 +540,13 @@ cpdef array_to_datetime( else: raise TypeError(f"{type(val)} is not convertible to datetime") + cnp.PyArray_MultiIter_NEXT(mi) + except (TypeError, OverflowError, ValueError) as ex: ex.args = (f"{ex}, at position {i}",) if is_coerce: iresult[i] = NPY_NAT + cnp.PyArray_MultiIter_NEXT(mi) continue elif is_raise: raise diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 8fdba8992f627..74398eb0e2405 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -4329,6 +4329,28 @@ cdef class CustomBusinessHour(BusinessHour): cdef class _CustomBusinessMonth(BusinessMixin): + """ + DateOffset subclass representing custom business month(s). + + Increments between beginning/end of month dates. + + Parameters + ---------- + n : int, default 1 + The number of months represented. + normalize : bool, default False + Normalize start/end dates to midnight before generating date range. + weekmask : str, Default 'Mon Tue Wed Thu Fri' + Weekmask of valid business days, passed to ``numpy.busdaycalendar``. + holidays : list + List/array of dates to exclude from the set of valid business days, + passed to ``numpy.busdaycalendar``. + calendar : np.busdaycalendar + Calendar to integrate. + offset : timedelta, default timedelta(0) + Time offset to apply. + """ + _attributes = tuple( ["n", "normalize", "weekmask", "holidays", "calendar", "offset"] ) @@ -4404,124 +4426,10 @@ cdef class _CustomBusinessMonth(BusinessMixin): cdef class CustomBusinessMonthEnd(_CustomBusinessMonth): - """ - DateOffset subclass representing custom business month(s). - - Increments between end of month dates. - - Parameters - ---------- - n : int, default 1 - The number of months represented. - normalize : bool, default False - Normalize end dates to midnight before generating date range. - weekmask : str, Default 'Mon Tue Wed Thu Fri' - Weekmask of valid business days, passed to ``numpy.busdaycalendar``. - holidays : list - List/array of dates to exclude from the set of valid business days, - passed to ``numpy.busdaycalendar``. - calendar : np.busdaycalendar - Calendar to integrate. - offset : timedelta, default timedelta(0) - Time offset to apply. - - See Also - -------- - :class:`~pandas.tseries.offsets.DateOffset` : Standard kind of date increment. - - Examples - -------- - In the example below we use the default parameters. - - >>> ts = pd.Timestamp(2022, 8, 5) - >>> ts + pd.offsets.CustomBusinessMonthEnd() - Timestamp('2022-08-31 00:00:00') - - Custom business month end can be specified by ``weekmask`` parameter. - To convert the returned datetime object to its string representation - the function strftime() is used in the next example. - - >>> import datetime as dt - >>> freq = pd.offsets.CustomBusinessMonthEnd(weekmask="Wed Thu") - >>> pd.date_range(dt.datetime(2022, 7, 10), dt.datetime(2022, 12, 18), - ... freq=freq).strftime('%a %d %b %Y %H:%M') - Index(['Thu 28 Jul 2022 00:00', 'Wed 31 Aug 2022 00:00', - 'Thu 29 Sep 2022 00:00', 'Thu 27 Oct 2022 00:00', - 'Wed 30 Nov 2022 00:00'], - dtype='object') - - Using NumPy business day calendar you can define custom holidays. - - >>> import datetime as dt - >>> bdc = np.busdaycalendar(holidays=['2022-08-01', '2022-09-30', - ... '2022-10-31', '2022-11-01']) - >>> freq = pd.offsets.CustomBusinessMonthEnd(calendar=bdc) - >>> pd.date_range(dt.datetime(2022, 7, 10), dt.datetime(2022, 11, 10), freq=freq) - DatetimeIndex(['2022-07-29', '2022-08-31', '2022-09-29', '2022-10-28'], - dtype='datetime64[ns]', freq='CBM') - """ - _prefix = "CBM" cdef class CustomBusinessMonthBegin(_CustomBusinessMonth): - """ - DateOffset subclass representing custom business month(s). - - Increments between beginning of month dates. - - Parameters - ---------- - n : int, default 1 - The number of months represented. - normalize : bool, default False - Normalize start dates to midnight before generating date range. - weekmask : str, Default 'Mon Tue Wed Thu Fri' - Weekmask of valid business days, passed to ``numpy.busdaycalendar``. - holidays : list - List/array of dates to exclude from the set of valid business days, - passed to ``numpy.busdaycalendar``. - calendar : np.busdaycalendar - Calendar to integrate. - offset : timedelta, default timedelta(0) - Time offset to apply. - - See Also - -------- - :class:`~pandas.tseries.offsets.DateOffset` : Standard kind of date increment. - - Examples - -------- - In the example below we use the default parameters. - - >>> ts = pd.Timestamp(2022, 8, 5) - >>> ts + pd.offsets.CustomBusinessMonthBegin() - Timestamp('2022-09-01 00:00:00') - - Custom business month start can be specified by ``weekmask`` parameter. - To convert the returned datetime object to its string representation - the function strftime() is used in the next example. - - >>> import datetime as dt - >>> freq = pd.offsets.CustomBusinessMonthBegin(weekmask="Wed Thu") - >>> pd.date_range(dt.datetime(2022, 7, 10), dt.datetime(2022, 12, 18), - ... freq=freq).strftime('%a %d %b %Y %H:%M') - Index(['Wed 03 Aug 2022 00:00', 'Thu 01 Sep 2022 00:00', - 'Wed 05 Oct 2022 00:00', 'Wed 02 Nov 2022 00:00', - 'Thu 01 Dec 2022 00:00'], - dtype='object') - - Using NumPy business day calendar you can define custom holidays. - - >>> import datetime as dt - >>> bdc = np.busdaycalendar(holidays=['2022-08-01', '2022-09-30', - ... '2022-10-31', '2022-11-01']) - >>> freq = pd.offsets.CustomBusinessMonthBegin(calendar=bdc) - >>> pd.date_range(dt.datetime(2022, 7, 10), dt.datetime(2022, 11, 10), freq=freq) - DatetimeIndex(['2022-08-02', '2022-09-01', '2022-10-03', '2022-11-02'], - dtype='datetime64[ns]', freq='CBMS') - """ - _prefix = "CBMS" diff --git a/pandas/_typing.py b/pandas/_typing.py index f18c67fcb0c90..0e2a0881f0122 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -4,7 +4,6 @@ Hashable, Iterator, Mapping, - MutableMapping, Sequence, ) from datetime import ( @@ -104,7 +103,6 @@ TypeGuard: Any = None HashableT = TypeVar("HashableT", bound=Hashable) -MutableMappingT = TypeVar("MutableMappingT", bound=MutableMapping) # array-like diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 4ff3de2fc7b2b..8c14d8c030ee3 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -923,7 +923,7 @@ def value_counts_internal( else: values = _ensure_arraylike(values, func_name="value_counts") - keys, counts, _ = value_counts_arraylike(values, dropna) + keys, counts = value_counts_arraylike(values, dropna) if keys.dtype == np.float16: keys = keys.astype(np.float32) @@ -948,7 +948,7 @@ def value_counts_internal( # Called once from SparseArray, otherwise could be private def value_counts_arraylike( values: np.ndarray, dropna: bool, mask: npt.NDArray[np.bool_] | None = None -) -> tuple[ArrayLike, npt.NDArray[np.int64], int]: +) -> tuple[ArrayLike, npt.NDArray[np.int64]]: """ Parameters ---------- @@ -964,7 +964,7 @@ def value_counts_arraylike( original = values values = _ensure_data(values) - keys, counts, na_counter = htable.value_count(values, dropna, mask=mask) + keys, counts = htable.value_count(values, dropna, mask=mask) if needs_i8_conversion(original.dtype): # datetime, timedelta, or period @@ -974,7 +974,7 @@ def value_counts_arraylike( keys, counts = keys[mask], counts[mask] res_keys = _reconstruct_data(keys, original.dtype, original) - return res_keys, counts, na_counter + return res_keys, counts def duplicated( diff --git a/pandas/core/api.py b/pandas/core/api.py index 2cfe5ffc0170d..f13ccdd98586f 100644 --- a/pandas/core/api.py +++ b/pandas/core/api.py @@ -42,6 +42,7 @@ UInt64Dtype, ) from pandas.core.arrays.string_ import StringDtype +from pandas.core.case_when import case_when from pandas.core.construction import array from pandas.core.flags import Flags from pandas.core.groupby import ( @@ -85,6 +86,7 @@ "ArrowDtype", "bdate_range", "BooleanDtype", + "case_when", "Categorical", "CategoricalDtype", "CategoricalIndex", diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index 56d3711c7d13b..819a4370e5510 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -1052,22 +1052,28 @@ def value_counts(self, dropna: bool = True) -> Series: ) from pandas.arrays import IntegerArray - keys, value_counts, na_counter = algos.value_counts_arraylike( - self._data, dropna=dropna, mask=self._mask + keys, value_counts = algos.value_counts_arraylike( + self._data, dropna=True, mask=self._mask ) - mask_index = np.zeros((len(value_counts),), dtype=np.bool_) - mask = mask_index.copy() - if na_counter > 0: - mask_index[-1] = True + if dropna: + res = Series(value_counts, index=keys, name="count", copy=False) + res.index = res.index.astype(self.dtype) + res = res.astype("Int64") + return res - arr = IntegerArray(value_counts, mask) - index = Index( - self.dtype.construct_array_type()( - keys, mask_index # type: ignore[arg-type] - ) - ) - return Series(arr, index=index, name="count", copy=False) + # if we want nans, count the mask + counts = np.empty(len(value_counts) + 1, dtype="int64") + counts[:-1] = value_counts + counts[-1] = self._mask.sum() + + index = Index(keys, dtype=self.dtype).insert(len(keys), self.dtype.na_value) + index = index.astype(self.dtype) + + mask = np.zeros(len(counts), dtype="bool") + counts_array = IntegerArray(counts, mask) + + return Series(counts_array, index=index, name="count", copy=False) @doc(ExtensionArray.equals) def equals(self, other) -> bool: diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index cf349220e4ba7..608468da486b5 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -890,7 +890,7 @@ def value_counts(self, dropna: bool = True) -> Series: Series, ) - keys, counts, _ = algos.value_counts_arraylike(self.sp_values, dropna=dropna) + keys, counts = algos.value_counts_arraylike(self.sp_values, dropna=dropna) fcounts = self.sp_index.ngaps if fcounts > 0 and (not self._null_fill_value or not dropna): mask = isna(keys) if self._null_fill_value else keys == self.fill_value diff --git a/pandas/core/case_when.py b/pandas/core/case_when.py new file mode 100644 index 0000000000000..11e4a319f7b71 --- /dev/null +++ b/pandas/core/case_when.py @@ -0,0 +1,209 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +from pandas.core.dtypes.cast import ( + construct_1d_arraylike_from_scalar, + find_common_type, +) +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_scalar, +) +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCSeries, +) +from pandas.core.dtypes.missing import na_value_for_dtype + +from pandas.core.common import convert_to_list_like +from pandas.core.construction import array as pd_array + +if TYPE_CHECKING: + from pandas._typing import ( + ListLike, + Scalar, + Series, + ) + + +def case_when( + *args: ListLike | Scalar, + default: Scalar | ListLike | None = None, + level: int | None = None, +) -> Series: + """ + Replace values where the conditions are True. + + Parameters + ---------- + *args : array-like, scalar + Variable argument of conditions and expected replacements. + Takes the form: `condition0`, `replacement0`, + `condition1`, `replacement1`, ... . + `condition` should be a 1-D boolean array. + The provided boolean conditions should have the same size. + `replacement` should be a 1-D array or a scalar. + If `replacement` is a 1-D array, it should have the same + shape as the paired `condition`. + When multiple boolean conditions are satisfied, + the first replacement is used. + + default : scalar, array-like, default None + If provided, it is the replacement value to use + if all conditions evaluate to False. + If default is a 1-D array, it should have the same shape as + `condition` and `replacement`. If not specified, + entries will be filled with the corresponding + NULL value (``np.nan`` for numpy dtypes, ``pd.NA`` for extension + dtypes). + + level : int, default None + Alignment level if needed. + + .. versionadded:: 2.2.0 + + Returns + ------- + Series + + See Also + -------- + Series.mask : Replace values where the condition is True. + + Examples + -------- + >>> df = pd.DataFrame({ + ... "a": [0,0,1,2], + ... "b": [0,3,4,5], + ... "c": [6,7,8,9] + ... }) + >>> df + a b c + 0 0 0 6 + 1 0 3 7 + 2 1 4 8 + 3 2 5 9 + + >>> pd.case_when(df.a.gt(0), df.a, # condition, replacement + ... df.b.gt(0), df.b, + ... default=df.c) # optional + 0 6 + 1 3 + 2 1 + 3 2 + Name: c, dtype: int64 + """ + from pandas import Series + + len_args = len(args) + if not len_args: + raise ValueError( + "Kindly provide at least one boolean condition, " + "with a corresponding replacement." + ) + if len_args % 2: + raise ValueError( + "The number of boolean conditions should be equal " + "to the number of replacements. " + "However, the total number of conditions and replacements " + f"is {len(args)}, which is an odd number." + ) + + counter = len_args // 2 - 1 + counter = range(counter, -1, -1) + conditions = [] + for num, condition in zip(counter, args[-2::-2]): + if not hasattr(condition, "shape"): + condition = np.asanyarray(condition) + if condition.ndim > 1: + raise ValueError(f"condition{num} is not a one dimensional array.") + if not is_bool_dtype(condition): + raise TypeError(f"condition{num} is not a boolean array.") + conditions.append(condition) + bool_length = {condition.size for condition in conditions} + if len(bool_length) > 1: + raise ValueError("All boolean conditions should have the same length.") + bool_length = conditions[0].size + if default is not None: + if is_scalar(default): + default = construct_1d_arraylike_from_scalar( + default, length=bool_length, dtype=None + ) + if not isinstance(default, ABCDataFrame): + default = convert_to_list_like(default) + if not hasattr(default, "shape"): + default = pd_array(default, copy=False) + if default.ndim > 1: + raise ValueError( + "The provided default argument should " + "either be a scalar or a 1-D array." + ) + if default.size != conditions[0].size: + raise ValueError( + "The length of the default argument should " + "be the same as the length of any " + "of the boolean conditions." + ) + replacements = [] + # ideally we could skip these checks and let `Series.mask` + # handle it - however, this step is necessary to get + # a common dtype for multiple conditions and replacements. + for num, replacement in zip(counter, args[-1::-2]): + if is_scalar(replacement): + replacement = construct_1d_arraylike_from_scalar( + replacement, length=bool_length, dtype=None + ) + if not isinstance(replacement, ABCDataFrame): + replacement = convert_to_list_like(replacement) + if not hasattr(replacement, "shape"): + replacement = pd_array(replacement, copy=False) + if replacement.ndim > 1: + raise ValueError(f"replacement{num} should be a 1-D array.") + if replacement.size != bool_length: + raise ValueError( + f"The size of replacement{num} array" + f"does not match the size of condition{num} array." + ) + replacements.append(replacement) + common_dtype = [arr.dtype for arr in replacements] + if default is not None: + common_dtype.append(default.dtype) + if len(set(common_dtype)) > 1: + common_dtype = find_common_type(common_dtype) + replacements = [ + arr.astype(common_dtype, copy=False) + if isinstance(arr, ABCSeries) + else pd_array(arr, dtype=common_dtype, copy=False) + for arr in replacements + ] + if (default is not None) and isinstance(default, ABCSeries): + default = default.astype(common_dtype, copy=False) + elif default is not None: + default = pd_array(default, dtype=common_dtype, copy=False) + else: + common_dtype = common_dtype[0] + + if default is None: + default = construct_1d_arraylike_from_scalar( + na_value_for_dtype(common_dtype, compat=False), + length=bool_length, + dtype=common_dtype, + ) + default = Series(default) + elif not isinstance(default, ABCSeries): + default = Series(default) + + for position, condition, replacement in zip(counter, conditions, replacements): + try: + default = default.mask( + condition, other=replacement, axis=0, inplace=False, level=level + ) + except Exception as error: + raise ValueError( + f"condition{position} and replacement{position} failed to evaluate. " + f"Original error message: {error}" + ) from error + return default diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 3208a742738a3..74e785be06356 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1133,16 +1133,7 @@ def convert_dtypes( base_dtype = np.dtype(str) else: base_dtype = inferred_dtype - if ( - base_dtype.kind == "O" # type: ignore[union-attr] - and len(input_array) > 0 - and isna(input_array).all() - ): - import pyarrow as pa - - pa_type = pa.null() - else: - pa_type = to_pyarrow_type(base_dtype) + pa_type = to_pyarrow_type(base_dtype) if pa_type is not None: inferred_dtype = ArrowDtype(pa_type) elif dtype_backend == "numpy_nullable" and isinstance(inferred_dtype, ArrowDtype): diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 51fceb1f09a62..432c0a745c7a0 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -230,7 +230,6 @@ Level, MergeHow, MergeValidate, - MutableMappingT, NaAction, NaPosition, NsmallestNlargestKeep, @@ -1928,27 +1927,6 @@ def _create_data_for_split_and_tight_to_dict( def to_dict( self, orient: Literal["dict", "list", "series", "split", "tight", "index"] = ..., - *, - into: type[MutableMappingT] | MutableMappingT, - index: bool = ..., - ) -> MutableMappingT: - ... - - @overload - def to_dict( - self, - orient: Literal["records"], - *, - into: type[MutableMappingT] | MutableMappingT, - index: bool = ..., - ) -> list[MutableMappingT]: - ... - - @overload - def to_dict( - self, - orient: Literal["dict", "list", "series", "split", "tight", "index"] = ..., - *, into: type[dict] = ..., index: bool = ..., ) -> dict: @@ -1958,14 +1936,11 @@ def to_dict( def to_dict( self, orient: Literal["records"], - *, into: type[dict] = ..., index: bool = ..., ) -> list[dict]: ... - # error: Incompatible default for argument "into" (default has type "type - # [dict[Any, Any]]", argument has type "type[MutableMappingT] | MutableMappingT") @deprecate_nonkeyword_arguments( version="3.0", allowed_args=["self", "orient"], name="to_dict" ) @@ -1974,10 +1949,9 @@ def to_dict( orient: Literal[ "dict", "list", "series", "split", "tight", "records", "index" ] = "dict", - into: type[MutableMappingT] - | MutableMappingT = dict, # type: ignore[assignment] + into: type[dict] = dict, index: bool = True, - ) -> MutableMappingT | list[MutableMappingT]: + ) -> dict | list[dict]: """ Convert the DataFrame to a dictionary. @@ -2005,7 +1979,7 @@ def to_dict( 'tight' as an allowed value for the ``orient`` argument into : class, default dict - The collections.abc.MutableMapping subclass used for all Mappings + The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. @@ -2019,10 +1993,9 @@ def to_dict( Returns ------- - dict, list or collections.abc.MutableMapping - Return a collections.abc.MutableMapping object representing the - DataFrame. The resulting transformation depends on the `orient` - parameter. + dict, list or collections.abc.Mapping + Return a collections.abc.Mapping object representing the DataFrame. + The resulting transformation depends on the `orient` parameter. See Also -------- @@ -2081,7 +2054,7 @@ def to_dict( """ from pandas.core.methods.to_dict import to_dict - return to_dict(self, orient, into=into, index=index) + return to_dict(self, orient, into, index) @deprecate_nonkeyword_arguments( version="3.0", allowed_args=["self", "destination_table"], name="to_gbq" @@ -3299,55 +3272,6 @@ def to_html( render_links=render_links, ) - @overload - def to_xml( - self, - path_or_buffer: None = ..., - *, - index: bool = ..., - root_name: str | None = ..., - row_name: str | None = ..., - na_rep: str | None = ..., - attr_cols: list[str] | None = ..., - elem_cols: list[str] | None = ..., - namespaces: dict[str | None, str] | None = ..., - prefix: str | None = ..., - encoding: str = ..., - xml_declaration: bool | None = ..., - pretty_print: bool | None = ..., - parser: XMLParsers | None = ..., - stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = ..., - compression: CompressionOptions = ..., - storage_options: StorageOptions | None = ..., - ) -> str: - ... - - @overload - def to_xml( - self, - path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str], - *, - index: bool = ..., - root_name: str | None = ..., - row_name: str | None = ..., - na_rep: str | None = ..., - attr_cols: list[str] | None = ..., - elem_cols: list[str] | None = ..., - namespaces: dict[str | None, str] | None = ..., - prefix: str | None = ..., - encoding: str = ..., - xml_declaration: bool | None = ..., - pretty_print: bool | None = ..., - parser: XMLParsers | None = ..., - stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = ..., - compression: CompressionOptions = ..., - storage_options: StorageOptions | None = ..., - ) -> None: - ... - - @deprecate_nonkeyword_arguments( - version="3.0", allowed_args=["self", "path_or_buffer"], name="to_xml" - ) @doc( storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buffer", diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 9017ff121976b..e23887159c9c6 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4011,8 +4011,8 @@ def _get_fill_indexer( self, target: Index, method: str_t, limit: int | None = None, tolerance=None ) -> npt.NDArray[np.intp]: if self._is_multi: - if not (self.is_monotonic_increasing or self.is_monotonic_decreasing): - raise ValueError("index must be monotonic increasing or decreasing") + # TODO: get_indexer_with_fill docstring says values must be _sorted_ + # but that doesn't appear to be enforced # error: "IndexEngine" has no attribute "get_indexer_with_fill" engine = self._engine with warnings.catch_warnings(): diff --git a/pandas/core/methods/to_dict.py b/pandas/core/methods/to_dict.py index 3295c4741c03d..f4e0dcddcd34a 100644 --- a/pandas/core/methods/to_dict.py +++ b/pandas/core/methods/to_dict.py @@ -3,7 +3,6 @@ from typing import ( TYPE_CHECKING, Literal, - overload, ) import warnings @@ -17,66 +16,17 @@ from pandas.core import common as com if TYPE_CHECKING: - from pandas._typing import MutableMappingT - from pandas import DataFrame -@overload -def to_dict( - df: DataFrame, - orient: Literal["dict", "list", "series", "split", "tight", "index"] = ..., - *, - into: type[MutableMappingT] | MutableMappingT, - index: bool = ..., -) -> MutableMappingT: - ... - - -@overload -def to_dict( - df: DataFrame, - orient: Literal["records"], - *, - into: type[MutableMappingT] | MutableMappingT, - index: bool = ..., -) -> list[MutableMappingT]: - ... - - -@overload -def to_dict( - df: DataFrame, - orient: Literal["dict", "list", "series", "split", "tight", "index"] = ..., - *, - into: type[dict] = ..., - index: bool = ..., -) -> dict: - ... - - -@overload -def to_dict( - df: DataFrame, - orient: Literal["records"], - *, - into: type[dict] = ..., - index: bool = ..., -) -> list[dict]: - ... - - -# error: Incompatible default for argument "into" (default has type "type[dict -# [Any, Any]]", argument has type "type[MutableMappingT] | MutableMappingT") def to_dict( df: DataFrame, orient: Literal[ "dict", "list", "series", "split", "tight", "records", "index" ] = "dict", - *, - into: type[MutableMappingT] | MutableMappingT = dict, # type: ignore[assignment] + into: type[dict] = dict, index: bool = True, -) -> MutableMappingT | list[MutableMappingT]: +) -> dict | list[dict]: """ Convert the DataFrame to a dictionary. @@ -104,7 +54,7 @@ def to_dict( 'tight' as an allowed value for the ``orient`` argument into : class, default dict - The collections.abc.MutableMapping subclass used for all Mappings + The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. @@ -119,8 +69,8 @@ def to_dict( Returns ------- dict, list or collections.abc.Mapping - Return a collections.abc.MutableMapping object representing the - DataFrame. The resulting transformation depends on the `orient` parameter. + Return a collections.abc.Mapping object representing the DataFrame. + The resulting transformation depends on the `orient` parameter. """ if not df.columns.is_unique: warnings.warn( @@ -153,7 +103,7 @@ def to_dict( are_all_object_dtype_cols = len(box_native_indices) == len(df.dtypes) if orient == "dict": - return into_c((k, v.to_dict(into=into)) for k, v in df.items()) + return into_c((k, v.to_dict(into)) for k, v in df.items()) elif orient == "list": object_dtype_indices_as_set: set[int] = set(box_native_indices) diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 7d37a9f1d5113..e9b2bacd9e1df 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -986,7 +986,7 @@ def interpolate( downcast : optional, 'infer' or None, defaults to None Downcast dtypes if possible. - .. deprecated:: 2.1.0 + .. deprecated::2.1.0 ``**kwargs`` : optional Keyword arguments to pass on to the interpolating function. diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index ba6579a739f54..4b9fcc80af4bb 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -2443,8 +2443,6 @@ def _factorize_keys( .astype(np.intp, copy=False), len(dc.dictionary), ) - if dc.null_count > 0: - count += 1 if how == "right": return rlab, llab, count return llab, rlab, count diff --git a/pandas/core/series.py b/pandas/core/series.py index d5785a2171cb3..2228c07c19f5e 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -48,7 +48,6 @@ from pandas.util._decorators import ( Appender, Substitution, - deprecate_nonkeyword_arguments, doc, ) from pandas.util._exceptions import find_stack_level @@ -100,6 +99,7 @@ from pandas.core.arrays.arrow import StructAccessor from pandas.core.arrays.categorical import CategoricalAccessor from pandas.core.arrays.sparse import SparseAccessor +from pandas.core.case_when import case_when from pandas.core.construction import ( extract_array, sanitize_array, @@ -168,7 +168,7 @@ IndexKeyFunc, IndexLabel, Level, - MutableMappingT, + ListLike, NaPosition, NumpySorter, NumpyValueArrayLike, @@ -1924,40 +1924,21 @@ def keys(self) -> Index: """ return self.index - @overload - def to_dict( - self, *, into: type[MutableMappingT] | MutableMappingT - ) -> MutableMappingT: - ... - - @overload - def to_dict(self, *, into: type[dict] = ...) -> dict: - ... - - # error: Incompatible default for argument "into" (default has type "type[ - # dict[Any, Any]]", argument has type "type[MutableMappingT] | MutableMappingT") - @deprecate_nonkeyword_arguments( - version="3.0", allowed_args=["self"], name="to_dict" - ) - def to_dict( - self, - into: type[MutableMappingT] - | MutableMappingT = dict, # type: ignore[assignment] - ) -> MutableMappingT: + def to_dict(self, into: type[dict] = dict) -> dict: """ Convert Series to {label -> value} dict or dict-like object. Parameters ---------- into : class, default dict - The collections.abc.MutableMapping subclass to use as the return - object. Can be the actual class or an empty instance of the mapping - type you want. If you want a collections.defaultdict, you must - pass it initialized. + The collections.abc.Mapping subclass to use as the return + object. Can be the actual class or an empty + instance of the mapping type you want. If you want a + collections.defaultdict, you must pass it initialized. Returns ------- - collections.abc.MutableMapping + collections.abc.Mapping Key-value representation of Series. Examples @@ -1966,10 +1947,10 @@ def to_dict( >>> s.to_dict() {0: 1, 1: 2, 2: 3, 3: 4} >>> from collections import OrderedDict, defaultdict - >>> s.to_dict(into=OrderedDict) + >>> s.to_dict(OrderedDict) OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> dd = defaultdict(list) - >>> s.to_dict(into=dd) + >>> s.to_dict(dd) defaultdict(<class 'list'>, {0: 1, 1: 2, 2: 3, 3: 4}) """ # GH16122 @@ -5462,6 +5443,70 @@ def between( return lmask & rmask + def case_when( + self, + *args: ListLike | Callable | Scalar, + level: int | None = None, + ) -> Series: + """ + Replace values where the conditions are True. + + Parameters + ---------- + *args : array-like, scalar + Variable argument of conditions and expected replacements. + Takes the form: `condition0`, `replacement0`, + `condition1`, `replacement1`, ... . + `condition` should be a 1-D boolean array-like object + or a callable. If `condition` is a callable, + it is computed on the Series + and should return a boolean Series or array. + The callable must not change the input Series + (though pandas doesn`t check it). `replacement` should be a + 1-D array-like object, a scalar or a callable. + If `replacement` is a callable, it is computed on the Series + and should return a scalar or Series. The callable + must not change the input Series + (though pandas doesn`t check it). + + level : int, default None + Alignment level if needed. + + .. versionadded:: 2.2.0 + + Returns + ------- + Series + + See Also + -------- + Series.mask : Replace values where the condition is True. + + Examples + -------- + >>> df = pd.DataFrame({ + ... "a": [0,0,1,2], + ... "b": [0,3,4,5], + ... "c": [6,7,8,9] + ... }) + >>> df + a b c + 0 0 0 6 + 1 0 3 7 + 2 1 4 8 + 3 2 5 9 + + >>> df.c.case_when(df.a.gt(0), df.a, # condition, replacement + ... df.b.gt(0), df.b) + 0 6 + 1 3 + 2 1 + 3 2 + Name: c, dtype: int64 + """ + args = [com.apply_if_callable(arg, self) for arg in args] + return case_when(*args, default=self, level=level) + # ---------------------------------------------------------------------- # Convert to types that support pd.NA diff --git a/pandas/io/xml.py b/pandas/io/xml.py index bd3b515dbca2f..918fe4d22ea62 100644 --- a/pandas/io/xml.py +++ b/pandas/io/xml.py @@ -88,7 +88,7 @@ class _XMLFrameParser: Parse only the attributes at the specified ``xpath``. names : list - Column names for :class:`~pandas.DataFrame` of parsed XML data. + Column names for :class:`~pandas.DataFrame`of parsed XML data. dtype : dict Data type for data or columns. E.g. {{'a': np.float64, diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py index 60bcb97aaa364..ae4fe5d56ebf6 100644 --- a/pandas/tests/api/test_api.py +++ b/pandas/tests/api/test_api.py @@ -106,6 +106,7 @@ class TestPDApi(Base): funcs = [ "array", "bdate_range", + "case_when", "concat", "crosstab", "cut", diff --git a/pandas/tests/frame/methods/test_join.py b/pandas/tests/frame/methods/test_join.py index 3d21faf8b1729..2d4ac1d4a4444 100644 --- a/pandas/tests/frame/methods/test_join.py +++ b/pandas/tests/frame/methods/test_join.py @@ -158,14 +158,9 @@ def test_join_invalid_validate(left_no_dup, right_no_dup): left_no_dup.merge(right_no_dup, on="a", validate="invalid") -@pytest.mark.parametrize("dtype", ["object", "string[pyarrow]"]) -def test_join_on_single_col_dup_on_right(left_no_dup, right_w_dups, dtype): +def test_join_on_single_col_dup_on_right(left_no_dup, right_w_dups): # GH 46622 # Dups on right allowed by one_to_many constraint - if dtype == "string[pyarrow]": - pytest.importorskip("pyarrow") - left_no_dup = left_no_dup.astype(dtype) - right_w_dups.index = right_w_dups.index.astype(dtype) left_no_dup.join( right_w_dups, on="a", diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py index d86692477f381..78b2c493ec116 100644 --- a/pandas/tests/indexes/multi/test_indexing.py +++ b/pandas/tests/indexes/multi/test_indexing.py @@ -342,19 +342,6 @@ def test_get_indexer_methods(self): expected = np.array([4, 6, 7], dtype=pad_indexer.dtype) tm.assert_almost_equal(expected, pad_indexer) - @pytest.mark.parametrize("method", ["pad", "ffill", "backfill", "bfill", "nearest"]) - def test_get_indexer_methods_raise_for_non_monotonic(self, method): - # 53452 - mi = MultiIndex.from_arrays([[0, 4, 2], [0, 4, 2]]) - if method == "nearest": - err = NotImplementedError - msg = "not implemented yet for MultiIndex" - else: - err = ValueError - msg = "index must be monotonic increasing or decreasing" - with pytest.raises(err, match=msg): - mi.get_indexer([(1, 1)], method=method) - def test_get_indexer_three_or_more_levels(self): # https://github.com/pandas-dev/pandas/issues/29896 # tests get_indexer() on MultiIndexes with 3+ levels diff --git a/pandas/tests/libs/test_hashtable.py b/pandas/tests/libs/test_hashtable.py index 2c8f4c4149528..b78e6426ca17f 100644 --- a/pandas/tests/libs/test_hashtable.py +++ b/pandas/tests/libs/test_hashtable.py @@ -586,26 +586,15 @@ def test_value_count(self, dtype, writable): expected = (np.arange(N) + N).astype(dtype) values = np.repeat(expected, 5) values.flags.writeable = writable - keys, counts, _ = ht.value_count(values, False) + keys, counts = ht.value_count(values, False) tm.assert_numpy_array_equal(np.sort(keys), expected) assert np.all(counts == 5) - def test_value_count_mask(self, dtype): - if dtype == np.object_: - pytest.skip("mask not implemented for object dtype") - values = np.array([1] * 5, dtype=dtype) - mask = np.zeros((5,), dtype=np.bool_) - mask[1] = True - mask[4] = True - keys, counts, na_counter = ht.value_count(values, False, mask=mask) - assert len(keys) == 2 - assert na_counter == 2 - def test_value_count_stable(self, dtype, writable): # GH12679 values = np.array([2, 1, 5, 22, 3, -1, 8]).astype(dtype) values.flags.writeable = writable - keys, counts, _ = ht.value_count(values, False) + keys, counts = ht.value_count(values, False) tm.assert_numpy_array_equal(keys, values) assert np.all(counts == 1) @@ -696,9 +685,9 @@ def test_unique_label_indices(): class TestHelpFunctionsWithNans: def test_value_count(self, dtype): values = np.array([np.nan, np.nan, np.nan], dtype=dtype) - keys, counts, _ = ht.value_count(values, True) + keys, counts = ht.value_count(values, True) assert len(keys) == 0 - keys, counts, _ = ht.value_count(values, False) + keys, counts = ht.value_count(values, False) assert len(keys) == 1 and np.all(np.isnan(keys)) assert counts[0] == 3 diff --git a/pandas/tests/series/methods/test_convert_dtypes.py b/pandas/tests/series/methods/test_convert_dtypes.py index f621604faae4b..d1c79d0f00365 100644 --- a/pandas/tests/series/methods/test_convert_dtypes.py +++ b/pandas/tests/series/methods/test_convert_dtypes.py @@ -265,11 +265,3 @@ def test_convert_dtypes_pyarrow_to_np_nullable(self): result = ser.convert_dtypes(dtype_backend="numpy_nullable") expected = pd.Series(range(2), dtype="Int32") tm.assert_series_equal(result, expected) - - def test_convert_dtypes_pyarrow_null(self): - # GH#55346 - pa = pytest.importorskip("pyarrow") - ser = pd.Series([None, None]) - result = ser.convert_dtypes(dtype_backend="pyarrow") - expected = pd.Series([None, None], dtype=pd.ArrowDtype(pa.null())) - tm.assert_series_equal(result, expected) diff --git a/pandas/tests/series/methods/test_to_dict.py b/pandas/tests/series/methods/test_to_dict.py index 41c01f4537f23..4c3d9592eebe3 100644 --- a/pandas/tests/series/methods/test_to_dict.py +++ b/pandas/tests/series/methods/test_to_dict.py @@ -13,12 +13,12 @@ class TestSeriesToDict: ) def test_to_dict(self, mapping, datetime_series): # GH#16122 - result = Series(datetime_series.to_dict(into=mapping), name="ts") + result = Series(datetime_series.to_dict(mapping), name="ts") expected = datetime_series.copy() expected.index = expected.index._with_freq(None) tm.assert_series_equal(result, expected) - from_method = Series(datetime_series.to_dict(into=collections.Counter)) + from_method = Series(datetime_series.to_dict(collections.Counter)) from_constructor = Series(collections.Counter(datetime_series.items())) tm.assert_series_equal(from_method, from_constructor) diff --git a/pandas/tests/series/methods/test_value_counts.py b/pandas/tests/series/methods/test_value_counts.py index bde9902fec6e9..f54489ac8a8b4 100644 --- a/pandas/tests/series/methods/test_value_counts.py +++ b/pandas/tests/series/methods/test_value_counts.py @@ -250,22 +250,3 @@ def test_value_counts_complex_numbers(self, input_array, expected): # GH 17927 result = Series(input_array).value_counts() tm.assert_series_equal(result, expected) - - def test_value_counts_masked(self): - # GH#54984 - dtype = "Int64" - ser = Series([1, 2, None, 2, None, 3], dtype=dtype) - result = ser.value_counts(dropna=False) - expected = Series( - [2, 2, 1, 1], - index=Index([2, None, 1, 3], dtype=dtype), - dtype=dtype, - name="count", - ) - tm.assert_series_equal(result, expected) - - result = ser.value_counts(dropna=True) - expected = Series( - [2, 1, 1], index=Index([2, 1, 3], dtype=dtype), dtype=dtype, name="count" - ) - tm.assert_series_equal(result, expected) diff --git a/pandas/tests/test_case_when.py b/pandas/tests/test_case_when.py new file mode 100644 index 0000000000000..743a0012bf305 --- /dev/null +++ b/pandas/tests/test_case_when.py @@ -0,0 +1,155 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Series, + case_when, +) +import pandas._testing as tm + + +@pytest.fixture +def df(): + """ + base dataframe for testing + """ + return DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + + +# use fixture and parametrize +def test_case_when_no_args(): + """ + Raise ValueError if no args is provided. + """ + msg = "Kindly provide at least one boolean condition, " + msg += "with a corresponding replacement." + with pytest.raises(ValueError, match=msg): + case_when() + + +def test_case_when_odd_args(df): + """ + Raise ValueError if no of args is odd. + """ + msg = "The number of boolean conditions should be equal " + msg += "to the number of replacements. " + msg += "However, the total number of conditions and replacements " + msg += "is 3, which is an odd number." + with pytest.raises(ValueError, match=msg): + case_when(df["a"].eq(1), 1, df.a.gt(1)) + + +def test_case_when_boolean_ndim(df): + """ + Raise ValueError if boolean array ndim is greater than 1. + """ + with pytest.raises(ValueError, match="condition0 is not a one dimensional array."): + case_when(df, 2) + + +def test_case_when_not_boolean(df): + """ + Raise TypeError if condition is not a boolean array. + """ + with pytest.raises(TypeError, match="condition1 is not a boolean array."): + case_when(df["a"].eq(1), 1, df["a"], 2) + + +def test_case_when_multiple_bool_lengths(df): + """ + Raise ValueError if the boolean conditions do not have the same length. + """ + with pytest.raises( + ValueError, match="All boolean conditions should have the same length." + ): + case_when(df["a"].eq(1), 1, [True, False], 2) + + +def test_case_when_default_ndim(df): + """ + Raise ValueError if the default is not a 1D array. + """ + msg = "The provided default argument should " + msg += "either be a scalar or a 1-D array." + with pytest.raises(ValueError, match=msg): + case_when(df["a"].eq(1), 1, default=df) + + +def test_case_when_default_length(df): + """ + Raise ValueError if the default is not + the same length as any of the boolean conditions. + """ + msg = "The length of the default argument should " + msg += "be the same as the length of any " + msg += "of the boolean conditions." + with pytest.raises(ValueError, match=msg): + case_when(df["a"].eq(1), 1, default=[2]) + + +def test_case_when_replacement_ndim(df): + """ + Raise ValueError if the ndim of the replacement value is greater than 1. + """ + with pytest.raises(ValueError, match="replacement0 should be a 1-D array."): + case_when(df["a"].eq(1), df) + + +def test_case_when_replacement_length(df): + """ + Raise ValueError if the replacement size is not the same as the boolean array. + """ + msg = "The size of replacement0 array" + msg += "does not match the size of condition0 array." + with pytest.raises(ValueError, match=msg): + case_when(df["a"].eq(1), np.array([2])) + + +def test_case_when_multiple_conditions(df): + """ + Test output when booleans are derived from a computation + """ + result = case_when(df.a.eq(1), 1, Series([False, True, False]), 2) + expected = Series([1, 2, np.nan]) + tm.assert_series_equal(result, expected) + + +def test_case_when_multiple_conditions_replacement_list(df): + """ + Test output when replacement is a list + """ + result = case_when( + [True, False, False], 1, df["a"].gt(1) & df["b"].eq(5), [1, 2, 3] + ) + expected = Series([1, 2, np.nan], dtype="Int64") + tm.assert_series_equal(result, expected) + + +def test_case_when_multiple_conditions_replacement_series(df): + """ + Test output when replacement is a Series + """ + result = case_when( + np.array([True, False, False]), + 1, + df["a"].gt(1) & df["b"].eq(5), + Series([1, 2, 3]), + ) + expected = Series([1, 2, np.nan]) + tm.assert_series_equal(result, expected) + + +def test_case_when_multiple_conditions_default_is_not_none(df): + """ + Test output when default is not None + """ + result = case_when( + [True, False, False], + 1, + df["a"].gt(1) & df["b"].eq(5), + Series([1, 2, 3]), + default=-1, + ) + expected = Series([1, 2, -1]) + tm.assert_series_equal(result, expected) diff --git a/web/pandas/pdeps/0012-compact-and-reversible-JSON-interface.md b/web/pandas/pdeps/0012-compact-and-reversible-JSON-interface.md deleted file mode 100644 index 4fe4b935f144b..0000000000000 --- a/web/pandas/pdeps/0012-compact-and-reversible-JSON-interface.md +++ /dev/null @@ -1,437 +0,0 @@ -# PDEP-12: Compact and reversible JSON interface - -- Created: 16 June 2023 -- Status: Rejected -- Discussion: - [#53252](https://github.com/pandas-dev/pandas/issues/53252) - [#55038](https://github.com/pandas-dev/pandas/issues/55038) -- Author: [Philippe THOMY](https://github.com/loco-philippe) -- Revision: 3 - - -#### Summary -- [Abstract](./0012-compact-and-reversible-JSON-interface.md/#Abstract) - - [Problem description](./0012-compact-and-reversible-JSON-interface.md/#Problem-description) - - [Feature Description](./0012-compact-and-reversible-JSON-interface.md/#Feature-Description) -- [Scope](./0012-compact-and-reversible-JSON-interface.md/#Scope) -- [Motivation](./0012-compact-and-reversible-JSON-interface.md/#Motivation) - - [Why is it important to have a compact and reversible JSON interface ?](./0012-compact-and-reversible-JSON-interface.md/#Why-is-it-important-to-have-a-compact-and-reversible-JSON-interface-?) - - [Is it relevant to take an extended type into account ?](./0012-compact-and-reversible-JSON-interface.md/#Is-it-relevant-to-take-an-extended-type-into-account-?) - - [Is this only useful for pandas ?](./0012-compact-and-reversible-JSON-interface.md/#Is-this-only-useful-for-pandas-?) -- [Description](./0012-compact-and-reversible-JSON-interface.md/#Description) - - [Data typing](./0012-compact-and-reversible-JSON-interface.md/#Data-typing) - - [Correspondence between TableSchema and pandas](./panda0012-compact-and-reversible-JSON-interfaces_PDEP.md/#Correspondence-between-TableSchema-and-pandas) - - [JSON format](./0012-compact-and-reversible-JSON-interface.md/#JSON-format) - - [Conversion](./0012-compact-and-reversible-JSON-interface.md/#Conversion) -- [Usage and impact](./0012-compact-and-reversible-JSON-interface.md/#Usage-and-impact) - - [Usage](./0012-compact-and-reversible-JSON-interface.md/#Usage) - - [Compatibility](./0012-compact-and-reversible-JSON-interface.md/#Compatibility) - - [Impacts on the pandas framework](./0012-compact-and-reversible-JSON-interface.md/#Impacts-on-the-pandas-framework) - - [Risk to do / risk not to do](./0012-compact-and-reversible-JSON-interface.md/#Risk-to-do-/-risk-not-to-do) -- [Implementation](./0012-compact-and-reversible-JSON-interface.md/#Implementation) - - [Modules](./0012-compact-and-reversible-JSON-interface.md/#Modules) - - [Implementation options](./0012-compact-and-reversible-JSON-interface.md/#Implementation-options) -- [F.A.Q.](./0012-compact-and-reversible-JSON-interface.md/#F.A.Q.) -- [Synthesis](./0012-compact-and-reversible-JSON-interface.md/Synthesis) -- [Core team decision](./0012-compact-and-reversible-JSON-interface.md/#Core-team-decision) -- [Timeline](./0012-compact-and-reversible-JSON-interface.md/#Timeline) -- [PDEP history](./0012-compact-and-reversible-JSON-interface.md/#PDEP-history) -------------------------- -## Abstract - -### Problem description -The `dtype` and "Python type" are not explicitly taken into account in the current JSON interface. - -So, the JSON interface is not always reversible and has inconsistencies related to the consideration of the `dtype`. - -Another consequence is the partial application of the Table Schema specification in the `orient="table"` option (6 Table Schema data types are taken into account out of the 24 defined). - -Some JSON-interface problems are detailed in the [linked NoteBook](https://nbviewer.org/github/loco-philippe/ntv-pandas/blob/main/example/example_json_pandas.ipynb#Current-Json-interface) - - -### Feature Description -To have a simple, compact and reversible solution, I propose to use the [JSON-NTV format (Named and Typed Value)](https://github.com/loco-philippe/NTV#readme) - which integrates the notion of type - and its JSON-TAB variation for tabular data (the JSON-NTV format is defined in an [IETF Internet-Draft](https://datatracker.ietf.org/doc/draft-thomy-json-ntv/) (not yet an RFC !!) ). - -This solution allows to include a large number of types (not necessarily pandas `dtype`) which allows to have: -- a Table Schema JSON interface (`orient="table"`) which respects the Table Schema specification (going from 6 types to 20 types), -- a global JSON interface for all pandas data formats. - -#### Global JSON interface example -In the example below, a DataFrame with several data types is converted to JSON. - -The DataFrame resulting from this JSON is identical to the initial DataFrame (reversibility). - -With the existing JSON interface, this conversion is not possible. - -This example uses `ntv_pandas` module defined in the [ntv-pandas repository](https://github.com/loco-philippe/ntv-pandas#readme). - -*data example* -```python -In [1]: from shapely.geometry import Point - from datetime import date - import pandas as pd - import ntv_pandas as npd - -In [2]: data = {'index': [100, 200, 300, 400, 500, 600], - 'dates::date': [date(1964,1,1), date(1985,2,5), date(2022,1,21), date(1964,1,1), date(1985,2,5), date(2022,1,21)], - 'value': [10, 10, 20, 20, 30, 30], - 'value32': pd.Series([12, 12, 22, 22, 32, 32], dtype='int32'), - 'res': [10, 20, 30, 10, 20, 30], - 'coord::point': [Point(1,2), Point(3,4), Point(5,6), Point(7,8), Point(3,4), Point(5,6)], - 'names': pd.Series(['john', 'eric', 'judith', 'mila', 'hector', 'maria'], dtype='string'), - 'unique': True } - -In [3]: df = pd.DataFrame(data).set_index('index') - -In [4]: df -Out[4]: dates::date value value32 res coord::point names unique - index - 100 1964-01-01 10 12 10 POINT (1 2) john True - 200 1985-02-05 10 12 20 POINT (3 4) eric True - 300 2022-01-21 20 22 30 POINT (5 6) judith True - 400 1964-01-01 20 22 10 POINT (7 8) mila True - 500 1985-02-05 30 32 20 POINT (3 4) hector True - 600 2022-01-21 30 32 30 POINT (5 6) maria True -``` - -*JSON representation* - -```python -In [5]: df_to_json = npd.to_json(df) - pprint(df_to_json, width=120) -Out[5]: {':tab': {'coord::point': [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0], [3.0, 4.0], [5.0, 6.0]], - 'dates::date': ['1964-01-01', '1985-02-05', '2022-01-21', '1964-01-01', '1985-02-05', '2022-01-21'], - 'index': [100, 200, 300, 400, 500, 600], - 'names::string': ['john', 'eric', 'judith', 'mila', 'hector', 'maria'], - 'res': [10, 20, 30, 10, 20, 30], - 'unique': [True, True, True, True, True, True], - 'value': [10, 10, 20, 20, 30, 30], - 'value32::int32': [12, 12, 22, 22, 32, 32]}} -``` - -*Reversibility* - -```python -In [5]: df_from_json = npd.read_json(df_to_json) - print('df created from JSON is equal to initial df ? ', df_from_json.equals(df)) -Out[5]: df created from JSON is equal to initial df ? True -``` -Several other examples are provided in the [linked NoteBook](https://nbviewer.org/github/loco-philippe/ntv-pandas/blob/main/example/example_ntv_pandas.ipynb) - -#### Table Schema JSON interface example -In the example below, a DataFrame with several Table Schema data types is converted to JSON. - -The DataFrame resulting from this JSON is identical to the initial DataFrame (reversibility). - -With the existing Table Schema JSON interface, this conversion is not possible. - -```python -In [1]: from shapely.geometry import Point - from datetime import date - -In [2]: df = pd.DataFrame({ - 'end february::date': ['date(2023,2,28)', 'date(2024,2,29)', 'date(2025,2,28)'], - 'coordinates::point': ['Point([2.3, 48.9])', 'Point([5.4, 43.3])', 'Point([4.9, 45.8])'], - 'contact::email': ['john.doe@table.com', 'lisa.minelli@schema.com', 'walter.white@breaking.com'] - }) - -In [3]: df -Out[3]: end february::date coordinates::point contact::email - 0 2023-02-28 POINT (2.3 48.9) john.doe@table.com - 1 2024-02-29 POINT (5.4 43.3) lisa.minelli@schema.com - 2 2025-02-28 POINT (4.9 45.8) walter.white@breaking.com -``` - -*JSON representation* - -```python -In [4]: df_to_table = npd.to_json(df, table=True) - pprint(df_to_table, width=140, sort_dicts=False) -Out[4]: {'schema': {'fields': [{'name': 'index', 'type': 'integer'}, - {'name': 'end february', 'type': 'date'}, - {'name': 'coordinates', 'type': 'geopoint', 'format': 'array'}, - {'name': 'contact', 'type': 'string', 'format': 'email'}], - 'primaryKey': ['index'], - 'pandas_version': '1.4.0'}, - 'data': [{'index': 0, 'end february': '2023-02-28', 'coordinates': [2.3, 48.9], 'contact': 'john.doe@table.com'}, - {'index': 1, 'end february': '2024-02-29', 'coordinates': [5.4, 43.3], 'contact': 'lisa.minelli@schema.com'}, - {'index': 2, 'end february': '2025-02-28', 'coordinates': [4.9, 45.8], 'contact': 'walter.white@breaking.com'}]} -``` - -*Reversibility* - -```python -In [5]: df_from_table = npd.read_json(df_to_table) - print('df created from JSON is equal to initial df ? ', df_from_table.equals(df)) -Out[5]: df created from JSON is equal to initial df ? True -``` -Several other examples are provided in the [linked NoteBook](https://nbviewer.org/github/loco-philippe/ntv-pandas/blob/main/example/example_table_pandas.ipynb) - -## Scope -The objective is to make available the proposed JSON interface for any type of data and for `orient="table"` option or a new option `orient="ntv"`. - -The proposed interface is compatible with existing data. - -## Motivation - -### Why extend the `orient=table` option to other data types? -- The Table Schema specification defines 24 data types, 6 are taken into account in the pandas interface - -### Why is it important to have a compact and reversible JSON interface ? -- a reversible interface provides an exchange format. -- a textual exchange format facilitates exchanges between platforms (e.g. OpenData) -- a JSON exchange format can be used at API level - -### Is it relevant to take an extended type into account ? -- it avoids the addition of an additional data schema -- it increases the semantic scope of the data processed by pandas -- it is an answer to several issues (e.g. #12997, #14358, #16492, #35420, #35464, #36211, #39537, #49585, #50782, #51375, #52595, #53252) -- the use of a complementary type avoids having to modify the pandas data model - -### Is this only useful for pandas ? -- the JSON-TAB format is applicable to tabular data and multi-dimensional data. -- this JSON interface can therefore be used for any application using tabular or multi-dimensional data. This would allow for example reversible data exchanges between pandas - DataFrame and Xarray - DataArray (Xarray issue under construction) [see example DataFrame / DataArray](https://nbviewer.org/github/loco-philippe/NTV/blob/main/example/example_pandas.ipynb#Multidimensional-data). - -## Description - -The proposed solution is based on several key points: -- data typing -- correspondence between TableSchema and pandas -- JSON format for tabular data -- conversion to and from JSON format - -### Data typing -Data types are defined and managed in the NTV project (name, JSON encoder and decoder). - -Pandas `dtype` are compatible with NTV types : - -| **pandas dtype** | **NTV type** | -|--------------------|------------| -| intxx | intxx | -| uintxx | uintxx | -| floatxx | floatxx | -| datetime[ns] | datetime | -| datetime[ns, <tz>] | datetimetz | -| timedelta[ns] | durationiso| -| string | string | -| boolean | boolean | - -Note: -- datetime with timezone is a single NTV type (string ISO8601) -- `CategoricalDtype` and `SparseDtype` are included in the tabular JSON format -- `object` `dtype` is depending on the context (see below) -- `PeriodDtype` and `IntervalDtype` are to be defined - -JSON types (implicit or explicit) are converted in `dtype` following pandas JSON interface: - -| **JSON type** | **pandas dtype** | -|----------------|-------------------| -| number | int64 / float64 | -| string | string / object | -| array | object | -| object | object | -| true, false | boolean | -| null | NaT / NaN / None | - -Note: -- if an NTV type is defined, the `dtype` is adjusted accordingly -- the consideration of null type data needs to be clarified - -The other NTV types are associated with `object` `dtype`. - -### Correspondence between TableSchema and pandas -The TableSchema typing is carried by two attributes `format` and `type`. - -The table below shows the correspondence between TableSchema format / type and pandas NTVtype / dtype: - -| **format / type** | **NTV type / dtype** | -|--------------------|----------------------| -| default / datetime | / datetime64[ns] | -| default / number | / float64 | -| default / integer | / int64 | -| default / boolean | / bool | -| default / string | / object | -| default / duration | / timedelta64[ns] | -| email / string | email / string | -| uri / string | uri / string | -| default / object | object / object | -| default / array | array / object | -| default / date | date / object | -| default / time | time / object | -| default / year | year / int64 | -| default / yearmonth| month / int64 | -| array / geopoint | point / object | -| default / geojson | geojson / object | - -Note: -- other TableSchema format are defined and are to be studied (uuid, binary, topojson, specific format for geopoint and datation) -- the first six lines correspond to the existing - -### JSON format -The JSON format for the TableSchema interface is the existing. - -The JSON format for the Global interface is defined in [JSON-TAB](https://github.com/loco-philippe/NTV/blob/main/documentation/JSON-TAB-standard.pdf) specification. -It includes the naming rules originally defined in the [JSON-ND project](https://github.com/glenkleidon/JSON-ND) and support for categorical data. -The specification have to be updated to include sparse data. - -### Conversion -When data is associated with a non-`object` `dtype`, pandas conversion methods are used. -Otherwise, NTV conversion is used. - -#### pandas -> JSON -- `NTV type` is not defined : use `to_json()` -- `NTV type` is defined and `dtype` is not `object` : use `to_json()` -- `NTV type` is defined and `dtype` is `object` : use NTV conversion (if pandas conversion does not exist) - -#### JSON -> pandas -- `NTV type` is compatible with a `dtype` : use `read_json()` -- `NTV type` is not compatible with a `dtype` : use NTV conversion (if pandas conversion does not exist) - -## Usage and Impact - -### Usage -It seems to me that this proposal responds to important issues: -- having an efficient text format for data exchange - - The alternative CSV format is not reversible and obsolete (last revision in 2005). Current CSV tools do not comply with the standard. - -- taking into account "semantic" data in pandas objects - -- having a complete Table Schema interface - -### Compatibility -Interface can be used without NTV type (compatibility with existing data - [see examples](https://nbviewer.org/github/loco-philippe/ntv-pandas/blob/main/example/example_ntv_pandas.ipynb#Appendix-:-Series-tests)) - -If the interface is available, throw a new `orient` option in the JSON interface, the use of the feature is decoupled from the other features. - -### Impacts on the pandas framework -Initially, the impacts are very limited: -- modification of the `name` of `Series` or `DataFrame columns` (no functional impact), -- added an option in the Json interface (e.g. `orient='ntv'`) and added associated methods (no functional interference with the other methods) - -In later stages, several developments could be considered: -- validation of the `name` of `Series` or `DataFrame columns` , -- management of the NTV type as a "complementary-object-dtype" -- functional extensions depending on the NTV type - -### Risk to do / risk not to do -The JSON-NTV format and the JSON-TAB format are not (yet) recognized and used formats. The risk for pandas is that this function is not used (no functional impacts). - -On the other hand, the early use by pandas will allow a better consideration of the expectations and needs of pandas as well as a reflection on the evolution of the types supported by pandas. - -## Implementation - -### Modules -Two modules are defined for NTV: - -- json-ntv - - this module manages NTV data without dependency to another module - -- ntvconnector - - those modules manage the conversion between objects and JSON data. They have dependency with objects modules (e.g. connectors with shapely location have dependency with shapely). - -The pandas integration of the JSON interface requires importing only the json-ntv module. - -### Implementation options -The interface can be implemented as NTV connector (`SeriesConnector` and `DataFrameConnector`) and as a new pandas JSON interface `orient` option. - -Several pandas implementations are possible: - -1. External: - - In this implementation, the interface is available only in the NTV side. - This option means that this evolution of the JSON interface is not useful or strategic for pandas. - -2. NTV side: - - In this implementation, the interface is available in the both sides and the conversion is located inside NTV. - This option is the one that minimizes the impacts on the pandas side - -3. pandas side: - - In this implementation, the interface is available in the both sides and the conversion is located inside pandas. - This option allows pandas to keep control of this evolution - -4. pandas restricted: - - In this implementation, the pandas interface and the conversion are located inside pandas and only for non-object `dtype`. - This option makes it possible to offer a compact and reversible interface while prohibiting the introduction of types incompatible with the existing `dtype` - -## F.A.Q. - -**Q: Does `orient="table"` not do what you are proposing already?** - -**A**: In principle, yes, this option takes into account the notion of type. - -But this is very limited (see examples added in the [Notebook](https://nbviewer.org/github/loco-philippe/NTV/blob/main/example/example_pandas.ipynb)) : -- **Types and Json interface** - - the only way to keep the types in the json interface is to use the `orient='table'` option - - few dtypes are not allowed in json-table interface : period, timedelta64, interval - - allowed types are not always kept in json-table interface - - data with 'object' dtype is kept only id data is string - - with categorical dtype, the underlying dtype is not included in json interface -- **Data compactness** - - json-table interface is not compact (in the example in the [Notebook](https://nbviewer.org/github/loco-philippe/NTV/blob/main/example/example_pandas.ipynb#data-compactness))the size is triple or quadruple the size of the compact format -- **Reversibility** - - Interface is reversible only with few dtypes : int64, float64, bool, string, datetime64 and partially categorical -- **External types** - - the interface does not accept external types - - Table-schema defines 20 data types but the `orient="table"` interface takes into account 5 data types (see [table](https://nbviewer.org/github/loco-philippe/NTV/blob/main/example/example_pandas.ipynb#Converting-table-schema-type-to-pandas-dtype)) - - to integrate external types, it is necessary to first create ExtensionArray and ExtensionDtype objects - -The current interface is not compatible with the data structure defined by table-schema. For this to be possible, it is necessary to integrate a "type extension" like the one proposed (this has moreover been partially achieved with the notion of `extDtype` found in the interface for several formats). - -**Q: In general, we should only have 1 `"table"` format for pandas in read_json/to_json. There is also the issue of backwards compatibility if we do change the format. The fact that the table interface is buggy is not a reason to add a new interface (I'd rather fix those bugs). Can the existing format be adapted in a way that fixes the type issues/issues with roundtripping?** - -**A**: I will add two additional remarks: -- the types defined in Tableschema are partially taken into account (examples of types not taken into account in the interface: string-uri, array, date, time, year, geopoint, string-email): -- the `read_json()` interface works too with the following data: `{'simple': [1,2,3] }` (contrary to what is indicated in the documentation) but it is impossible with `to_json()` to recreate this simple json. - -I think that the problem cannot be limited to bug fixes and that a clear strategy must be defined for the Json interface in particular with the gradual abandonment in open-data solutions of the obsolete CSV format in favor of a Json format. - -As stated, the proposed solution addresses several shortcomings of the current interface and could simply fit into the pandas environment (the other option would be to consider that the Json interface is a peripheral function of pandas and can remain external to pandas) regardless of the `orient='table'` option. - -It is nevertheless possible to merge the proposed format and the `orient='table'` format in order to have an explicit management of the notion of `extDtype` - -**Q: As far as I can tell, JSON NTV is not in any form a standardised JSON format. I believe that pandas (and geopandas, which is where I came from to this issue) should try to follow either de facto or de jure standards and do not opt in for a file format that does not have any community support at this moment. This can obviously change in the future and that is where this PR should be revised. Why would pandas use this standard?** - -**A**: As indicated in the issue (and detailed in [the attached Notebook](https://nbviewer.org/github/loco-philippe/NTV/blob/main/example/example_pandas.ipynb)), the json interface is not reversible (`to_json` then `read_json` does not always return the initial object) and several shortcomings and bugs are present. The main cause of this problem is that the data type is not taken into account in the JSON format (or very partially with the `orient='table'` option). - -The proposal made answers this problem ([the example at the beginning of Notebook](https://nbviewer.org/github/loco-philippe/NTV/blob/main/example/example_pandas.ipynb#0---Simple-example) simply and clearly illustrates the interest of the proposal). - -Regarding the underlying JSON-NTV format, its impact is quite low for tabular data (it is limited to adding the type in the field name). -Nevertheless, the question is relevant: The JSON-NTV format ([IETF Internet-Draft](https://datatracker.ietf.org/doc/draft-thomy-json-ntv/)) is a shared, documented, supported and implemented format, but indeed the community support is for the moment reduced but it only asks to expand !! - -## Synthesis -To conclude, -- if it is important (or strategic) to have a reversible JSON interface for any type of data, the proposal can be allowed, -- if not, a third-party package listed in the [ecosystem](https://pandas.pydata.org/community/ecosystem.html) that reads/writes this format to/from pandas DataFrames should be considered - -## Core team decision -Vote was open from september-11 to setpember-26: -- Final tally is 0 approvals, 5 abstentions, 7 disapprove. The quorum has been met. The PDEP fails. - -**Disapprove comments** : -- 1 Given the newness of the proposed JSON NTV format, I would support (as described in the PDEP): "if not, a third-party package listed in the ecosystem that reads/writes this format to/from pandas DataFrames should be considered" -- 2 Same reason as -1-, this should be a third party package for now -- 3 Not mature enough, and not clear what the market size would be. -- 4 for the same reason I left in the PDEP: "I think this (JSON-NTV format) does not meet the bar of being a commonly used format for implementation within pandas" -- 5 agree with -4- -- 6 agree with the other core-dev responders. I think work in the existing json interface is extremely valuable. A number of the original issues raised are just bug fixes / extensions of already existing functionality. Trying to start anew is likely not worth the migration effort. That said if a format is well supported in the community we can reconsider in the future (obviously json is well supported but the actual specification detailed here is too new / not accepted as a standard) -- 7 while I do think having a more comprehensive JSON format would be worthwhile, making a new format part of pandas means an implicit endorsement of a standard that is still being reviewed by the broader community. - -**Decision**: -- add the `ntv-pandas` package in the [ecosystem](https://pandas.pydata.org/community/ecosystem.html) -- revisit again this PDEP at a later stage, for example in 1/2 to 1 year (based on the evolution of the Internet draft [JSON semantic format (JSON-NTV)](https://www.ietf.org/archive/id/draft-thomy-json-ntv-01.html) and the usage of the [ntv-pandas](https://github.com/loco-philippe/ntv-pandas#readme)) - -## Timeline -Not applicable - -## PDEP History - -- 16 June 2023: Initial draft -- 22 July 2023: Add F.A.Q. -- 06 September 2023: Add Table Schema extension -- 01 Octobre: Add Core team decision
- [x] closes #39154 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Continues the work started by @ElHoussineT(#50343) - uses `pd.Series.mask` under the hood - function implementation, as well as a Series method
https://api.github.com/repos/pandas-dev/pandas/pulls/55306
2023-09-27T13:49:27Z
2023-10-04T01:58:41Z
null
2023-10-04T14:47:59Z
Valid capitalization errors #32550
diff --git a/doc/source/development/contributing_docstring.rst b/doc/source/development/contributing_docstring.rst index 87aecb6936c9c..cf9ef51e0d8dc 100644 --- a/doc/source/development/contributing_docstring.rst +++ b/doc/source/development/contributing_docstring.rst @@ -646,7 +646,7 @@ A simple example could be: pandas.Series Subset of the original series with the n first values. - See Also + See also -------- tail : Return the last n elements of the Series. diff --git a/doc/source/development/contributing_documentation.rst b/doc/source/development/contributing_documentation.rst index 964f82be4fa7b..443470e6c50f9 100644 --- a/doc/source/development/contributing_documentation.rst +++ b/doc/source/development/contributing_documentation.rst @@ -14,7 +14,7 @@ experts. If something in the docs doesn't make sense to you, updating the relevant section after you figure it out is a great way to ensure it will help the next person. Please visit the `issues page <https://github.com/pandas-dev/pandas/issues?page=1&q=is%3Aopen+sort%3Aupdated-desc+label%3ADocs>`__ for a full list of issues that are currently open regarding the -Pandas documentation. +pandas documentation.
- [x] Fix capitalization at [Fix capitalization among headings in documentation files #32550](https://github.com/pandas-dev/pandas/issues/32550) - [x] https://github.com/pandas-dev/pandas/blob/main/doc/source/development/contributing_docstring.rst - [x] https://github.com/pandas-dev/pandas/blob/main/doc/source/development/contributing_documentation.rst
https://api.github.com/repos/pandas-dev/pandas/pulls/55304
2023-09-27T03:10:33Z
2023-10-22T20:05:04Z
null
2023-10-22T20:05:09Z
Backport PR #55300 on branch 2.1.x (TST: xfail test due to new numexpr version)
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index 878e94c15e16b..670d4d4f554a6 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -22,14 +22,12 @@ ) import pandas._testing as tm from pandas.core.computation import expressions as expr -from pandas.core.computation.expressions import ( - _MIN_ELEMENTS, - NUMEXPR_INSTALLED, -) +from pandas.core.computation.expressions import _MIN_ELEMENTS from pandas.tests.frame.common import ( _check_mixed_float, _check_mixed_int, ) +from pandas.util.version import Version @pytest.fixture(autouse=True, params=[0, 1000000], ids=["numexpr", "python"]) @@ -502,10 +500,19 @@ def test_floordiv_axis0(self): result2 = df.floordiv(ser.values, axis=0) tm.assert_frame_equal(result2, expected) - @pytest.mark.skipif(not NUMEXPR_INSTALLED, reason="numexpr not installed") @pytest.mark.parametrize("opname", ["floordiv", "pow"]) - def test_floordiv_axis0_numexpr_path(self, opname): + def test_floordiv_axis0_numexpr_path(self, opname, request): # case that goes through numexpr and has to fall back to masked_arith_op + ne = pytest.importorskip("numexpr") + if ( + Version(ne.__version__) >= Version("2.8.7") + and opname == "pow" + and "python" in request.node.callspec.id + ): + request.node.add_marker( + pytest.mark.xfail(reason="https://github.com/pydata/numexpr/issues/454") + ) + op = getattr(operator, opname) arr = np.arange(_MIN_ELEMENTS + 100).reshape(_MIN_ELEMENTS // 100 + 1, -1) * 100
Backport PR #55300: TST: xfail test due to new numexpr version
https://api.github.com/repos/pandas-dev/pandas/pulls/55303
2023-09-26T21:04:05Z
2023-09-26T23:08:49Z
2023-09-26T23:08:49Z
2023-09-26T23:08:49Z
TST: xfail test due to new numexpr version
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index 24a70e55e2f0e..bb9a76829c77d 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -22,14 +22,12 @@ ) import pandas._testing as tm from pandas.core.computation import expressions as expr -from pandas.core.computation.expressions import ( - _MIN_ELEMENTS, - NUMEXPR_INSTALLED, -) +from pandas.core.computation.expressions import _MIN_ELEMENTS from pandas.tests.frame.common import ( _check_mixed_float, _check_mixed_int, ) +from pandas.util.version import Version @pytest.fixture(autouse=True, params=[0, 1000000], ids=["numexpr", "python"]) @@ -501,10 +499,19 @@ def test_floordiv_axis0(self): result2 = df.floordiv(ser.values, axis=0) tm.assert_frame_equal(result2, expected) - @pytest.mark.skipif(not NUMEXPR_INSTALLED, reason="numexpr not installed") @pytest.mark.parametrize("opname", ["floordiv", "pow"]) - def test_floordiv_axis0_numexpr_path(self, opname): + def test_floordiv_axis0_numexpr_path(self, opname, request): # case that goes through numexpr and has to fall back to masked_arith_op + ne = pytest.importorskip("numexpr") + if ( + Version(ne.__version__) >= Version("2.8.7") + and opname == "pow" + and "python" in request.node.callspec.id + ): + request.node.add_marker( + pytest.mark.xfail(reason="https://github.com/pydata/numexpr/issues/454") + ) + op = getattr(operator, opname) arr = np.arange(_MIN_ELEMENTS + 100).reshape(_MIN_ELEMENTS // 100 + 1, -1) * 100
null
https://api.github.com/repos/pandas-dev/pandas/pulls/55300
2023-09-26T18:07:06Z
2023-09-26T21:02:55Z
2023-09-26T21:02:55Z
2023-09-26T21:02:58Z
ExtensionArray.interpolate() method and tests
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index f3bb7323c7d5f..f4f9a4d27fbdb 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -890,7 +890,6 @@ def interpolate( limit, limit_direction, limit_area, - fill_value, copy: bool, **kwargs, ) -> Self: @@ -904,7 +903,6 @@ def interpolate( ... limit=3, ... limit_direction="forward", ... index=pd.Index([1, 2, 3, 4]), - ... fill_value=1, ... copy=False, ... axis=0, ... limit_area="inside" @@ -914,9 +912,24 @@ def interpolate( Length: 4, dtype: float64 """ # NB: we return type(self) even if copy=False - raise NotImplementedError( - f"{type(self).__name__} does not implement interpolate" + + if not self.dtype._is_numeric or self.dtype._is_boolean: + raise TypeError( + f"Cannot interpolate {type(self)} dtype as it is non-numeric or boolean" + ) + data = self.to_numpy('float64', copy=copy) + missing.interpolate_2d_inplace( + data = data, + axis = axis, + index = index, + method = method, + limit = limit, + limit_direction = limit_direction, + limit_area = limit_area, + **kwargs, ) + return self._from_sequence(data, dtype=self.dtype) + def _pad_or_backfill( self, *, method: FillnaOptions, limit: int | None = None, copy: bool = True diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py index 4e0bc8d804bab..11e2405912925 100644 --- a/pandas/tests/extension/base/methods.py +++ b/pandas/tests/extension/base/methods.py @@ -704,3 +704,23 @@ def test_equals(self, data, as_series, box): def test_equals_same_data_different_object(self, data): # https://github.com/pandas-dev/pandas/issues/34660 assert pd.Series(data).equals(pd.Series(data)) + + @pytest.mark.parametrize("method", ['linear']) + def test_interpolate(self, data_for_sorting, method): + data = data_for_sorting + if not data.dtype._is_numeric or data.dtype._is_boolean: + pytest.skip("Interpolate is only valid for numeric non-boolean dtypes") + + ser = pd.Series(data) + result = ser.interpolate(method) + + values = np.array(data, dtype=np.float64) + pd.core.missing.interpolate_2d_inplace( + data = np.array(values), + axis = 0, + index = ser.index, + method = method + ) + expected = pd.Series(values, dtype=data.dtype) + + tm.assert_series_equal(result, expected) \ No newline at end of file diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py index 9ce7ac309b6d3..d2486e6662e56 100644 --- a/pandas/tests/extension/decimal/array.py +++ b/pandas/tests/extension/decimal/array.py @@ -70,6 +70,8 @@ class DecimalArray(OpsMixin, ExtensionScalarOpsMixin, ExtensionArray): __array_priority__ = 1000 def __init__(self, values, dtype=None, copy=False, context=None) -> None: + # Cast float np arrays to obj before converting to Decimal + values = np.asarray(values, dtype=object) for i, val in enumerate(values): if is_float(val) or is_integer(val): if np.isnan(val): @@ -81,7 +83,6 @@ def __init__(self, values, dtype=None, copy=False, context=None) -> None: values[i] = DecimalDtype.type(val) # type: ignore[arg-type] elif not isinstance(val, decimal.Decimal): raise TypeError("All values must be of type " + str(decimal.Decimal)) - values = np.asarray(values, dtype=object) self._data = values # Some aliases for common attribute names to ensure pandas supports
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Adds a default interpolate method for numeric EAs. Partially fixes #40252 by removing fill_value from interpolate signiture. This doesn't fully close as I haven't implemented interpolate for integers - should interpolating an IntegerArray return a IntegerArray or FloatArray? Is this the right way to go about testing interpolate? I couldn't think of a simpler way to go about it. This works for Series(EA).interpolate(), but falls over for DataFrame(EA).interpolate() as axis=1 is passed.
https://api.github.com/repos/pandas-dev/pandas/pulls/55297
2023-09-26T12:22:54Z
2023-11-07T01:07:32Z
null
2023-11-07T01:07:33Z
Backport PR #55275 on branch 2.1.x (Update pyproject.toml - replace `output_formatting` with `output-formatting`)
diff --git a/.github/workflows/package-checks.yml b/.github/workflows/package-checks.yml index 04abcf4ce8816..7d20ef92587d9 100644 --- a/.github/workflows/package-checks.yml +++ b/.github/workflows/package-checks.yml @@ -24,7 +24,7 @@ jobs: runs-on: ubuntu-22.04 strategy: matrix: - extra: ["test", "performance", "computation", "fss", "aws", "gcp", "excel", "parquet", "feather", "hdf5", "spss", "postgresql", "mysql", "sql-other", "html", "xml", "plot", "output_formatting", "clipboard", "compression", "consortium-standard", "all"] + extra: ["test", "performance", "computation", "fss", "aws", "gcp", "excel", "parquet", "feather", "hdf5", "spss", "postgresql", "mysql", "sql-other", "html", "xml", "plot", "output-formatting", "clipboard", "compression", "consortium-standard", "all"] fail-fast: false name: Install Extras - ${{ matrix.extra }} concurrency: diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst index ae7c9d4ea9c62..7570f9f33d265 100644 --- a/doc/source/getting_started/install.rst +++ b/doc/source/getting_started/install.rst @@ -247,14 +247,14 @@ Dependency Minimum Version pip ext Visualization ^^^^^^^^^^^^^ -Installable with ``pip install "pandas[plot, output_formatting]"``. +Installable with ``pip install "pandas[plot, output-formatting]"``. ========================= ================== ================== ============================================================= Dependency Minimum Version pip extra Notes ========================= ================== ================== ============================================================= matplotlib 3.6.1 plot Plotting library -Jinja2 3.1.2 output_formatting Conditional formatting with DataFrame.style -tabulate 0.8.10 output_formatting Printing in Markdown-friendly format (see `tabulate`_) +Jinja2 3.1.2 output-formatting Conditional formatting with DataFrame.style +tabulate 0.8.10 output-formatting Printing in Markdown-friendly format (see `tabulate`_) ========================= ================== ================== ============================================================= Computation diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index 97aeb56924e65..6fec66ec8d556 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -29,7 +29,7 @@ Bug fixes Other ~~~~~ -- +- Fixed non-working installation of optional dependency group ``output_formatting``. Replacing underscore ``_`` with a dash ``-`` fixes broken dependency resolution. A correct way to use now is ``pip install pandas[output-formatting]``. - .. --------------------------------------------------------------------------- diff --git a/pyproject.toml b/pyproject.toml index 477c9be6274b3..3cfdadc268160 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -80,7 +80,7 @@ sql-other = ['SQLAlchemy>=1.4.36'] html = ['beautifulsoup4>=4.11.1', 'html5lib>=1.1', 'lxml>=4.8.0'] xml = ['lxml>=4.8.0'] plot = ['matplotlib>=3.6.1'] -output_formatting = ['jinja2>=3.1.2', 'tabulate>=0.8.10'] +output-formatting = ['jinja2>=3.1.2', 'tabulate>=0.8.10'] clipboard = ['PyQt5>=5.15.6', 'qtpy>=2.2.0'] compression = ['zstandard>=0.17.0'] consortium-standard = ['dataframe-api-compat>=0.1.7']
Backport PR #55275: Update pyproject.toml - replace `output_formatting` with `output-formatting`
https://api.github.com/repos/pandas-dev/pandas/pulls/55291
2023-09-26T00:12:26Z
2023-09-26T05:11:35Z
2023-09-26T05:11:35Z
2023-09-26T05:11:35Z
Backport PR #55276 on branch 2.1.x (Bump pypa/cibuildwheel from 2.15.0 to 2.16.0)
diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 97d78a1a9afe3..ecb30fefb9ff2 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -138,7 +138,7 @@ jobs: run: echo "sdist_name=$(cd ./dist && ls -d */)" >> "$GITHUB_ENV" - name: Build wheels - uses: pypa/cibuildwheel@v2.15.0 + uses: pypa/cibuildwheel@v2.16.0 with: package-dir: ./dist/${{ matrix.buildplat[1] == 'macosx_*' && env.sdist_name || needs.build_sdist.outputs.sdist_file }} env:
Backport PR #55276: Bump pypa/cibuildwheel from 2.15.0 to 2.16.0
https://api.github.com/repos/pandas-dev/pandas/pulls/55288
2023-09-25T18:09:59Z
2023-09-25T20:23:54Z
2023-09-25T20:23:54Z
2023-09-25T20:23:54Z
BUG: df.resample('MS', closed='right') incorrectly places bins
diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index 6fec66ec8d556..1a25b848e0f84 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -13,6 +13,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ +- Fixed bug in :meth:`DataFrame.resample` where bin edges were not correct for :class:`~pandas.tseries.offsets.MonthBegin` (:issue:`55271`) - Fixed bug where PDEP-6 warning about setting an item of an incompatible dtype was being shown when creating a new conditional column (:issue:`55025`) - @@ -21,7 +22,8 @@ Fixed regressions Bug fixes ~~~~~~~~~ -- +- Fixed bug in :meth:`DataFrame.resample` not respecting ``closed`` and ``label`` arguments for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55282`) +- Fixed bug in :meth:`DataFrame.resample` where bin edges were not correct for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55281`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 30d654078bd05..fa30e35e36925 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -2297,7 +2297,17 @@ def _adjust_bin_edges( ) -> tuple[DatetimeIndex, npt.NDArray[np.int64]]: # Some hacks for > daily data, see #1471, #1458, #1483 - if self.freq != "D" and is_superperiod(self.freq, "D"): + if self.freq.name in ("BM", "ME", "W") or self.freq.name.split("-")[0] in ( + "BQ", + "BA", + "Q", + "A", + "W", + ): + # If the right end-point is on the last day of the month, roll forwards + # until the last moment of that day. Note that we only do this for offsets + # which correspond to the end of a super-daily period - "month start", for + # example, is excluded. if self.closed == "right": # GH 21459, GH 9119: Adjust the bins relative to the wall time edges_dti = binner.tz_localize(None) diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py index d929dfa6e1e59..113e2d8986ad2 100644 --- a/pandas/tests/resample/test_datetime_index.py +++ b/pandas/tests/resample/test_datetime_index.py @@ -660,7 +660,7 @@ def test_resample_reresample(unit): s = Series(np.random.default_rng(2).random(len(dti)), dti) bs = s.resample("B", closed="right", label="right").mean() result = bs.resample("8H").mean() - assert len(result) == 22 + assert len(result) == 25 assert isinstance(result.index.freq, offsets.DateOffset) assert result.index.freq == offsets.Hour(8) @@ -2051,3 +2051,66 @@ def test_resample_M_deprecated(): with tm.assert_produces_warning(UserWarning, match=depr_msg): result = s.resample("2M").mean() tm.assert_series_equal(result, expected) + + +def test_resample_ms_closed_right(): + # https://github.com/pandas-dev/pandas/issues/55271 + dti = date_range(start="2020-01-31", freq="1min", periods=6000) + df = DataFrame({"ts": dti}, index=dti) + grouped = df.resample("MS", closed="right") + result = grouped.last() + expected = DataFrame( + {"ts": [datetime(2020, 2, 1), datetime(2020, 2, 4, 3, 59)]}, + index=DatetimeIndex([datetime(2020, 1, 1), datetime(2020, 2, 1)], freq="MS"), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("freq", ["B", "C"]) +def test_resample_c_b_closed_right(freq: str): + # https://github.com/pandas-dev/pandas/issues/55281 + dti = date_range(start="2020-01-31", freq="1min", periods=6000) + df = DataFrame({"ts": dti}, index=dti) + grouped = df.resample(freq, closed="right") + result = grouped.last() + expected = DataFrame( + { + "ts": [ + datetime(2020, 1, 31), + datetime(2020, 2, 3), + datetime(2020, 2, 4), + datetime(2020, 2, 4, 3, 59), + ] + }, + index=DatetimeIndex( + [ + datetime(2020, 1, 30), + datetime(2020, 1, 31), + datetime(2020, 2, 3), + datetime(2020, 2, 4), + ], + freq=freq, + ), + ) + tm.assert_frame_equal(result, expected) + + +def test_resample_b_55282(): + # https://github.com/pandas-dev/pandas/issues/55282 + s = Series( + [1, 2, 3, 4, 5, 6], index=date_range("2023-09-26", periods=6, freq="12H") + ) + result = s.resample("B", closed="right", label="right").mean() + expected = Series( + [1.0, 2.5, 4.5, 6.0], + index=DatetimeIndex( + [ + datetime(2023, 9, 26), + datetime(2023, 9, 27), + datetime(2023, 9, 28), + datetime(2023, 9, 29), + ], + freq="B", + ), + ) + tm.assert_series_equal(result, expected)
- [x] closes #55271 - [x] closes #55281 - [x] closes #55282 - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. ~todo: extra tests for 'B' with closed ='right' (linked issue below), whatsnew~ This fixes a regression from 2.1.0 - but, it also happens to fix two older bugs as a side-effect. So, I've put this in the 2.1.2 milestone
https://api.github.com/repos/pandas-dev/pandas/pulls/55283
2023-09-25T15:01:06Z
2023-09-26T15:57:41Z
2023-09-26T15:57:41Z
2023-10-05T12:04:25Z
Add remove_from_default_na options to read_csv, read_excel...
diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index acf35ebd6afe5..27c1c840eb12f 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -211,6 +211,9 @@ + fill('", "'.join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ") + """ ". +remove_from_default_na : Hashable or Iterable of Hashable, optional + Remvoe values from the default ``NaN`` values when parsing the data. + keep_default_na : bool, default True Whether or not to include the default ``NaN`` values when parsing the data. Depending on whether ``na_values`` is passed in, the behavior is as follows: @@ -718,6 +721,9 @@ def read_csv( | Iterable[Hashable] | Mapping[Hashable, Iterable[Hashable]] | None = ..., + remove_from_default_na: Hashable + | Iterable[Hashable] + | None = ..., keep_default_na: bool = ..., na_filter: bool = ..., verbose: bool = ..., @@ -781,6 +787,9 @@ def read_csv( | Iterable[Hashable] | Mapping[Hashable, Iterable[Hashable]] | None = ..., + remove_from_default_na: Hashable + | Iterable[Hashable] + | None = ..., keep_default_na: bool = ..., na_filter: bool = ..., verbose: bool = ..., @@ -844,6 +853,9 @@ def read_csv( | Iterable[Hashable] | Mapping[Hashable, Iterable[Hashable]] | None = ..., + remove_from_default_na: Hashable + | Iterable[Hashable] + | None = ..., keep_default_na: bool = ..., na_filter: bool = ..., verbose: bool = ..., @@ -920,6 +932,9 @@ def read_csv( | Iterable[Hashable] | Mapping[Hashable, Iterable[Hashable]] | None = None, + remove_from_default_na: Hashable + | Iterable[Hashable] + | None = ..., keep_default_na: bool = True, na_filter: bool = True, verbose: bool = False, @@ -1013,6 +1028,7 @@ def read_table( skipfooter: int = ..., nrows: int | None = ..., na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ..., + remove_from_default_na: Sequence[str] | None = ..., keep_default_na: bool = ..., na_filter: bool = ..., verbose: bool = ..., @@ -1073,6 +1089,7 @@ def read_table( skipfooter: int = ..., nrows: int | None = ..., na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ..., + remove_from_default_na: Sequence[str] | None = ..., keep_default_na: bool = ..., na_filter: bool = ..., verbose: bool = ..., @@ -1133,6 +1150,7 @@ def read_table( skipfooter: int = ..., nrows: int | None = ..., na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ..., + remove_from_default_na: Sequence[str] | None = ..., keep_default_na: bool = ..., na_filter: bool = ..., verbose: bool = ..., @@ -1193,6 +1211,7 @@ def read_table( skipfooter: int = ..., nrows: int | None = ..., na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ..., + remove_from_default_na: Sequence[str] | None = ..., keep_default_na: bool = ..., na_filter: bool = ..., verbose: bool = ..., @@ -1268,6 +1287,7 @@ def read_table( nrows: int | None = None, # NA and Missing Data Handling na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = None, + remove_from_default_na: Sequence[str] | None = ..., keep_default_na: bool = True, na_filter: bool = True, verbose: bool = False, @@ -1740,7 +1760,10 @@ def _clean_options( # Converting values to NA keep_default_na = options["keep_default_na"] - na_values, na_fvalues = _clean_na_values(na_values, keep_default_na) + remove_from_default_na = options["remove_from_default_na"] + na_values, na_fvalues = _clean_na_values( + na_values, remove_from_default_na, keep_default_na + ) # handle skiprows; this is internally handled by the # c-engine, so only need for python and pyarrow parsers @@ -1916,6 +1939,8 @@ def TextParser(*args, **kwds) -> TextFileReader: not in the header. na_values : scalar, str, list-like, or dict, optional Additional strings to recognize as NA/NaN. + remove_from_default_na : scalar, str, list-like, or dict, optional + Strings not to recognize as NA/NaN. keep_default_na : bool, default True thousands : str, optional Thousands separator @@ -1952,11 +1977,17 @@ def TextParser(*args, **kwds) -> TextFileReader: return TextFileReader(*args, **kwds) -def _clean_na_values(na_values, keep_default_na: bool = True): +def _clean_na_values(na_values, remove_from_default_na, keep_default_na: bool = True): na_fvalues: set | dict + if remove_from_default_na is None: + remove_from_default_na = set() + elif not is_list_like(remove_from_default_na): + remove_from_default_na = set([remove_from_default_na]) + else: + remove_from_default_na = set(remove_from_default_na) if na_values is None: if keep_default_na: - na_values = STR_NA_VALUES + na_values = STR_NA_VALUES - remove_from_default_na else: na_values = set() na_fvalues = set() @@ -1973,7 +2004,7 @@ def _clean_na_values(na_values, keep_default_na: bool = True): v = [v] if keep_default_na: - v = set(v) | STR_NA_VALUES + v = set(v) | (STR_NA_VALUES - remove_from_default_na) na_values[k] = v na_fvalues = {k: _floatify_na_values(v) for k, v in na_values.items()} @@ -1982,7 +2013,7 @@ def _clean_na_values(na_values, keep_default_na: bool = True): na_values = [na_values] na_values = _stringify_na_values(na_values) if keep_default_na: - na_values = na_values | STR_NA_VALUES + na_values = na_values | (STR_NA_VALUES - remove_from_default_na) na_fvalues = _floatify_na_values(na_values) diff --git a/pandas/tests/io/parser/test_na_values.py b/pandas/tests/io/parser/test_na_values.py index 9a16ec5a50d36..9828e55c66520 100644 --- a/pandas/tests/io/parser/test_na_values.py +++ b/pandas/tests/io/parser/test_na_values.py @@ -277,6 +277,26 @@ def test_na_value_dict_multi_index(all_parsers, index_col, expected): } ), ), + ( + {"remove_from_default_na": ["nan"]}, + DataFrame( + { + "A": ["a", "b", np.nan, "d", "e", "nan", "g"], + "B": [1, 2, 3, 4, 5, 6, 7], + "C": ["one", "two", "three", "nan", "five", np.nan, "seven"], + } + ), + ), + ( + {"na_values": ["nan"], "remove_from_default_na": ["nan"]}, + DataFrame( + { + "A": ["a", "b", np.nan, "d", "e", np.nan, "g"], + "B": [1, 2, 3, 4, 5, 6, 7], + "C": ["one", "two", "three", np.nan, "five", np.nan, "seven"], + } + ), + ), ], ) def test_na_values_keep_default(all_parsers, kwargs, expected):
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. https://github.com/pandas-dev/pandas/issues/52493 As already known, pd.read_csv(io.StringIO("a\nNone")).a[0] is 'None' on pandas 1 but NaN on pandas 2. Pandas uses NaN to represent missing values, but "None" often means 'No options satisfy the condition' in real data. Even though this is an unintentional breaking change, this bug is considered to be too late to be fixed now. I was trying to update libraries we use and bumped into this problem. So, I suggest to add a parameter 'remove_from_default_na' `read_excel(path, remove_from_default_na="None") ` which is also a straightforward solution to the issue below. https://github.com/pandas-dev/pandas/issues/19156 Without this parameter, the workaround would be something like > from pandas._libs.parsers import STR_NA_VALUES > read_excel(path, na_values=STR_NA_VALUES-set(["None"]), keep_default_na=False) Thank you for the great library.
https://api.github.com/repos/pandas-dev/pandas/pulls/55280
2023-09-25T12:36:43Z
2023-09-25T18:19:40Z
null
2023-09-26T00:32:39Z
BUG: df.resample('MS', closed='right') incorrectly places bins
diff --git a/pandas/_libs/tslibs/dtypes.pyx b/pandas/_libs/tslibs/dtypes.pyx index cca379c620aeb..48d70d8b2c0b0 100644 --- a/pandas/_libs/tslibs/dtypes.pyx +++ b/pandas/_libs/tslibs/dtypes.pyx @@ -189,13 +189,8 @@ OFFSET_TO_PERIOD_FREQSTR: dict = { "WEEKDAY": "D", "EOM": "M", "BM": "M", - "BQS": "Q", - "QS": "Q", "BQ": "Q", "BA": "A", - "AS": "A", - "BAS": "A", - "MS": "M", "D": "D", "B": "B", "min": "min", @@ -210,18 +205,16 @@ OFFSET_TO_PERIOD_FREQSTR: dict = { "ME": "M", "Y": "A", "BY": "A", - "YS": "A", - "BYS": "A", } cdef dict c_OFFSET_TO_PERIOD_FREQSTR = OFFSET_TO_PERIOD_FREQSTR cpdef freq_to_period_freqstr(freq_n, freq_name): if freq_n == 1: freqstr = f"""{c_OFFSET_TO_PERIOD_FREQSTR.get( - freq_name, freq_name)}""" + freq_name, None)}""" else: freqstr = f"""{freq_n}{c_OFFSET_TO_PERIOD_FREQSTR.get( - freq_name, freq_name)}""" + freq_name, None)}""" return freqstr # Map deprecated resolution abbreviations to correct resolution abbreviations diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py index d929dfa6e1e59..9be27edf50cf3 100644 --- a/pandas/tests/resample/test_datetime_index.py +++ b/pandas/tests/resample/test_datetime_index.py @@ -2051,3 +2051,16 @@ def test_resample_M_deprecated(): with tm.assert_produces_warning(UserWarning, match=depr_msg): result = s.resample("2M").mean() tm.assert_series_equal(result, expected) + + +def test_resample_ms_closed_right(): + # https://github.com/pandas-dev/pandas/issues/55271 + dti = date_range(start="2020-01-31", freq="1min", periods=6000) + df = DataFrame({"ts": dti}, index=dti) + grouped = df.resample("MS", closed="right") + result = grouped.last() + expected = DataFrame( + {"ts": [datetime(2020, 2, 1), datetime(2020, 2, 4, 3, 59)]}, + index=DatetimeIndex([datetime(2020, 1, 1), datetime(2020, 2, 1)], freq="MS"), + ) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 7aa245341cbdd..471904cc5b60b 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -59,13 +59,6 @@ # --------------------------------------------------------------------- # Offset related functions -_need_suffix = ["QS", "BQ", "BQS", "YS", "AS", "BY", "BA", "BYS", "BAS"] - -for _prefix in _need_suffix: - for _m in MONTHS: - key = f"{_prefix}-{_m}" - OFFSET_TO_PERIOD_FREQSTR[key] = OFFSET_TO_PERIOD_FREQSTR[_prefix] - for _prefix in ["A", "Q"]: for _m in MONTHS: _alias = f"{_prefix}-{_m}" @@ -502,10 +495,10 @@ def is_superperiod(source, target) -> bool: ------- bool """ - if target is None or source is None: - return False source = _maybe_coerce_freq(source) target = _maybe_coerce_freq(target) + if target is None or source is None: + return False if _is_annual(source): if _is_annual(target): @@ -544,7 +537,7 @@ def is_superperiod(source, target) -> bool: return False -def _maybe_coerce_freq(code) -> str: +def _maybe_coerce_freq(code) -> str | None: """we might need to coerce a code to a rule_code and uppercase it @@ -557,9 +550,10 @@ def _maybe_coerce_freq(code) -> str: ------- str """ - assert code is not None if isinstance(code, DateOffset): code = freq_to_period_freqstr(1, code.name) + if code is None: + return None if code in {"min", "s", "ms", "us", "ns"}: return code else:
- [ ] closes #55271 (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55278
2023-09-25T10:12:18Z
2023-09-25T10:34:46Z
null
2023-09-25T10:34:46Z
Updated `pre-commit` hooks versions.
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c01bf65818167..b0b511e1048c6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -20,11 +20,11 @@ ci: repos: - repo: https://github.com/hauntsaninja/black-pre-commit-mirror # black compiled with mypyc - rev: 23.7.0 + rev: 23.9.1 hooks: - id: black - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.287 + rev: v0.0.291 hooks: - id: ruff args: [--exit-non-zero-on-fix] @@ -107,7 +107,7 @@ repos: hooks: - id: isort - repo: https://github.com/asottile/pyupgrade - rev: v3.10.1 + rev: v3.13.0 hooks: - id: pyupgrade args: [--py39-plus] diff --git a/pyproject.toml b/pyproject.toml index 4e1c77413efda..df89bd129901f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -187,7 +187,7 @@ environment = {CFLAGS="-g0"} [tool.black] target-version = ['py39', 'py310'] -required-version = '23.7.0' +required-version = '23.9.1' exclude = ''' ( asv_bench/env
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55277
2023-09-25T09:29:05Z
2023-09-25T18:08:06Z
2023-09-25T18:08:06Z
2023-09-25T18:53:39Z
Bump pypa/cibuildwheel from 2.15.0 to 2.16.0
diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 83d14b51092e6..4c7a7b329777b 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -138,7 +138,7 @@ jobs: run: echo "sdist_name=$(cd ./dist && ls -d */)" >> "$GITHUB_ENV" - name: Build wheels - uses: pypa/cibuildwheel@v2.15.0 + uses: pypa/cibuildwheel@v2.16.0 with: package-dir: ./dist/${{ matrix.buildplat[1] == 'macosx_*' && env.sdist_name || needs.build_sdist.outputs.sdist_file }} env:
Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 2.15.0 to 2.16.0. <details> <summary>Release notes</summary> <p><em>Sourced from <a href="https://github.com/pypa/cibuildwheel/releases">pypa/cibuildwheel's releases</a>.</em></p> <blockquote> <h2>v2.16.0</h2> <ul> <li>✨ Add the ability to pass additional flags to a build frontend through the <a href="https://cibuildwheel.readthedocs.io/en/stable/options/#build-frontend">CIBW_BUILD_FRONTEND</a> option (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1588">#1588</a>).</li> <li>✨ The environment variable SOURCE_DATE_EPOCH is now automatically passed through to container Linux builds (useful for <a href="https://reproducible-builds.org/docs/source-date-epoch/">reproducible builds</a>!) (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1589">#1589</a>)</li> <li>🛠 Updates the prerelease CPython 3.12 version to 3.12.0rc2 (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1604">#1604</a>)</li> <li>🐛 Fix <code>requires_python</code> auto-detection from setup.py when the call to <code>setup()</code> is within an <code>if __name__ == &quot;__main__&quot;</code> block (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1613">#1613</a>)</li> <li>🐛 Fix a bug that prevented building Linux wheels in Docker on a Windows host (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1573">#1573</a>)</li> <li>🐛 <code>--only</code> can now select prerelease-pythons (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1564">#1564</a>)</li> <li>📚 Docs &amp; examples updates (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1582">#1582</a>, <a href="https://redirect.github.com/pypa/cibuildwheel/issues/1593">#1593</a>, <a href="https://redirect.github.com/pypa/cibuildwheel/issues/1598">#1598</a>, <a href="https://redirect.github.com/pypa/cibuildwheel/issues/1615">#1615</a>)</li> </ul> </blockquote> </details> <details> <summary>Changelog</summary> <p><em>Sourced from <a href="https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md">pypa/cibuildwheel's changelog</a>.</em></p> <blockquote> <h3>v2.16.0</h3> <p><em>18 September 2023</em></p> <ul> <li>✨ Add the ability to pass additional flags to a build frontend through the <a href="https://cibuildwheel.readthedocs.io/en/stable/options/#build-frontend">CIBW_BUILD_FRONTEND</a> option (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1588">#1588</a>).</li> <li>✨ The environment variable SOURCE_DATE_EPOCH is now automatically passed through to container Linux builds (useful for <a href="https://reproducible-builds.org/docs/source-date-epoch/">reproducible builds</a>!) (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1589">#1589</a>)</li> <li>🛠 Updates the prerelease CPython 3.12 version to 3.12.0rc2 (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1604">#1604</a>)</li> <li>🐛 Fix <code>requires_python</code> auto-detection from setup.py when the call to <code>setup()</code> is within an `if <strong>name</strong> == &quot;<strong>main</strong>&quot; block (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1613">#1613</a>)</li> <li>🐛 Fix a bug that prevented building Linux wheels in Docker on a Windows host (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1573">#1573</a>)</li> <li>🐛 <code>--only</code> can now select prerelease-pythons (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1564">#1564</a>)</li> <li>📚 Docs &amp; examples updates (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1582">#1582</a>, <a href="https://redirect.github.com/pypa/cibuildwheel/issues/1593">#1593</a>, <a href="https://redirect.github.com/pypa/cibuildwheel/issues/1598">#1598</a>, <a href="https://redirect.github.com/pypa/cibuildwheel/issues/1615">#1615</a>)</li> </ul> </blockquote> </details> <details> <summary>Commits</summary> <ul> <li><a href="https://github.com/pypa/cibuildwheel/commit/a873dd9cbf9e3c4c73a1fd11ac31cf835f6eb502"><code>a873dd9</code></a> Bump version: v2.16.0</li> <li><a href="https://github.com/pypa/cibuildwheel/commit/e8ba0d49edd2845a1a46395921609f1b7a194bbf"><code>e8ba0d4</code></a> Merge pull request <a href="https://redirect.github.com/pypa/cibuildwheel/issues/1615">#1615</a> from pypa/dependabot/github_actions/docker/setup-qem...</li> <li><a href="https://github.com/pypa/cibuildwheel/commit/f0feaffbaabd508d48bb83b5d46b83cac7107181"><code>f0feaff</code></a> Merge pull request <a href="https://redirect.github.com/pypa/cibuildwheel/issues/1613">#1613</a> from henryiii/henryiii/fix/mainif</li> <li><a href="https://github.com/pypa/cibuildwheel/commit/80a54b0226a8a4cc643bc24a968570871dd84364"><code>80a54b0</code></a> Merge pull request <a href="https://redirect.github.com/pypa/cibuildwheel/issues/1589">#1589</a> from dalcinl/source_date_epoch</li> <li><a href="https://github.com/pypa/cibuildwheel/commit/76dba0b9ba3b5143ff833d8414b023ecf2ce8a90"><code>76dba0b</code></a> Merge pull request <a href="https://redirect.github.com/pypa/cibuildwheel/issues/1588">#1588</a> from pypa/frontend-flags</li> <li><a href="https://github.com/pypa/cibuildwheel/commit/0954ffaa6586fffcdc720ab2b788ec1abcdf3481"><code>0954ffa</code></a> Merge pull request <a href="https://redirect.github.com/pypa/cibuildwheel/issues/1618">#1618</a> from pypa/rtd-update</li> <li><a href="https://github.com/pypa/cibuildwheel/commit/753cbd1ca526543d317f0678fb5bc16018ed5ee9"><code>753cbd1</code></a> Update RtD config to include mandatory build.os option</li> <li><a href="https://github.com/pypa/cibuildwheel/commit/825d89818ab1c7ca0bb9b9781b0b9beb74925b6d"><code>825d898</code></a> Merge pull request <a href="https://redirect.github.com/pypa/cibuildwheel/issues/1614">#1614</a> from henryiii/henryiii/chore/minor</li> <li><a href="https://github.com/pypa/cibuildwheel/commit/f5e60d647fe700ba6e357c30376e8a48f91e5974"><code>f5e60d6</code></a> fix: include examples too</li> <li><a href="https://github.com/pypa/cibuildwheel/commit/adc991c47b1cb56b6de3bb052686f8081791b21b"><code>adc991c</code></a> [Bot] Update dependencies (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1604">#1604</a>)</li> <li>Additional commits viewable in <a href="https://github.com/pypa/cibuildwheel/compare/v2.15.0...v2.16.0">compare view</a></li> </ul> </details> <br /> [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pypa/cibuildwheel&package-manager=github_actions&previous-version=2.15.0&new-version=2.16.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) --- <details> <summary>Dependabot commands and options</summary> <br /> You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show <dependency name> ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) </details>
https://api.github.com/repos/pandas-dev/pandas/pulls/55276
2023-09-25T08:51:03Z
2023-09-25T18:08:55Z
2023-09-25T18:08:55Z
2023-09-25T18:08:59Z
Update pyproject.toml - replace `output_formatting` with `output-formatting`
diff --git a/.github/workflows/package-checks.yml b/.github/workflows/package-checks.yml index 64a94d7fde5a9..a2c42af53c3a8 100644 --- a/.github/workflows/package-checks.yml +++ b/.github/workflows/package-checks.yml @@ -24,7 +24,7 @@ jobs: runs-on: ubuntu-22.04 strategy: matrix: - extra: ["test", "performance", "computation", "fss", "aws", "gcp", "excel", "parquet", "feather", "hdf5", "spss", "postgresql", "mysql", "sql-other", "html", "xml", "plot", "output_formatting", "clipboard", "compression", "consortium-standard", "all"] + extra: ["test", "performance", "computation", "fss", "aws", "gcp", "excel", "parquet", "feather", "hdf5", "spss", "postgresql", "mysql", "sql-other", "html", "xml", "plot", "output-formatting", "clipboard", "compression", "consortium-standard", "all"] fail-fast: false name: Install Extras - ${{ matrix.extra }} concurrency: diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst index 2c0787397e047..4a0ad4ae658c3 100644 --- a/doc/source/getting_started/install.rst +++ b/doc/source/getting_started/install.rst @@ -247,14 +247,14 @@ Dependency Minimum Version pip ext Visualization ^^^^^^^^^^^^^ -Installable with ``pip install "pandas[plot, output_formatting]"``. +Installable with ``pip install "pandas[plot, output-formatting]"``. ========================= ================== ================== ============================================================= Dependency Minimum Version pip extra Notes ========================= ================== ================== ============================================================= matplotlib 3.6.1 plot Plotting library -Jinja2 3.1.2 output_formatting Conditional formatting with DataFrame.style -tabulate 0.8.10 output_formatting Printing in Markdown-friendly format (see `tabulate`_) +Jinja2 3.1.2 output-formatting Conditional formatting with DataFrame.style +tabulate 0.8.10 output-formatting Printing in Markdown-friendly format (see `tabulate`_) ========================= ================== ================== ============================================================= Computation diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index 97aeb56924e65..6fec66ec8d556 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -29,7 +29,7 @@ Bug fixes Other ~~~~~ -- +- Fixed non-working installation of optional dependency group ``output_formatting``. Replacing underscore ``_`` with a dash ``-`` fixes broken dependency resolution. A correct way to use now is ``pip install pandas[output-formatting]``. - .. --------------------------------------------------------------------------- diff --git a/pyproject.toml b/pyproject.toml index 4e1c77413efda..f0d9dbcdf8c61 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -80,7 +80,7 @@ sql-other = ['SQLAlchemy>=1.4.36'] html = ['beautifulsoup4>=4.11.1', 'html5lib>=1.1', 'lxml>=4.8.0'] xml = ['lxml>=4.8.0'] plot = ['matplotlib>=3.6.1'] -output_formatting = ['jinja2>=3.1.2', 'tabulate>=0.8.10'] +output-formatting = ['jinja2>=3.1.2', 'tabulate>=0.8.10'] clipboard = ['PyQt5>=5.15.6', 'qtpy>=2.2.0'] compression = ['zstandard>=0.17.0'] consortium-standard = ['dataframe-api-compat>=0.1.7']
closes #55290 Simply said, none of this would work (dependencies from `output-formatting` would not be installed): ``` pip install pandas[output-formatting] pip install pandas[output_formatting] pip install pandas[output-formatting,xml] ``` When being distributed, underscores are replaces with dashes. So, in the packaged PKG-INFO file you won't find spots of `output_formatting`, but only `output-formatting`: ``` Requires-Dist: lxml>=4.8.0; extra == "xml" Requires-Dist: jinja2>=3.1.2; extra == "output-formatting" Requires-Dist: tabulate>=0.8.10; extra == "output-formatting" ``` But on the same time, this: ``` Provides-Extra: output_formatting ``` This fix addresses that.
https://api.github.com/repos/pandas-dev/pandas/pulls/55275
2023-09-25T08:28:45Z
2023-09-26T00:11:06Z
2023-09-26T00:11:06Z
2023-09-26T00:11:16Z
ASV: add GroupBy.sum benchmark with timedelta and integer types
diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py index b206523dfe851..4567b5b414301 100644 --- a/asv_bench/benchmarks/groupby.py +++ b/asv_bench/benchmarks/groupby.py @@ -841,6 +841,23 @@ def time_groupby_sum_multiindex(self): self.df.groupby(level=[0, 1]).sum() +class SumTimeDelta: + # GH 20660 + def setup(self): + N = 10**4 + self.df = DataFrame( + np.random.randint(1000, 100000, (N, 100)), + index=np.random.randint(200, size=(N,)), + ).astype("timedelta64[ns]") + self.df_int = self.df.copy().astype("int64") + + def time_groupby_sum_timedelta(self): + self.df.groupby(lambda x: x).sum() + + def time_groupby_sum_int(self): + self.df_int.groupby(lambda x: x).sum() + + class Transform: def setup(self): n1 = 400
- [x] closes #20660 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests). - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added new benchmark to `GroupBy.sum` method with `timedelta64` and `int64` types.
https://api.github.com/repos/pandas-dev/pandas/pulls/55273
2023-09-25T03:40:18Z
2023-09-25T18:20:52Z
2023-09-25T18:20:52Z
2023-09-28T20:03:00Z
This fix enables you to preserve the datetime precision when using the melt method.
diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index fa3cef6d9457d..d498c84358448 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -254,6 +254,7 @@ Bug fixes - Bug in :class:`AbstractHolidayCalendar` where timezone data was not propagated when computing holiday observances (:issue:`54580`) - Bug in :class:`pandas.core.window.Rolling` where duplicate datetimelike indexes are treated as consecutive rather than equal with ``closed='left'`` and ``closed='neither'`` (:issue:`20712`) - Bug in :meth:`DataFrame.apply` where passing ``raw=True`` ignored ``args`` passed to the applied function (:issue:`55009`) +- Bug in :meth:`pandas.DataFrame.melt` where it would not preserve the datetime (:issue:`55254`) - Bug in :meth:`pandas.read_excel` with a ODS file without cached formatted cell for float values (:issue:`55219`) Categorical diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index 74e6a6a28ccb0..387d43f47fe9b 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -134,7 +134,9 @@ def melt( mcolumns = id_vars + var_name + [value_name] - if frame.shape[1] > 0: + if frame.shape[1] > 0 and not any( + not isinstance(dt, np.dtype) and dt._supports_2d for dt in frame.dtypes + ): mdata[value_name] = concat( [frame.iloc[:, i] for i in range(frame.shape[1])] ).values diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py index 941478066a7d8..ef748e264188c 100644 --- a/pandas/tests/reshape/test_melt.py +++ b/pandas/tests/reshape/test_melt.py @@ -459,6 +459,47 @@ def test_melt_ea_columns(self): ) tm.assert_frame_equal(result, expected) + def test_melt_preserves_datetime(self): + df = DataFrame( + data=[ + { + "type": "A0", + "start_date": pd.Timestamp("2023/03/01", tz="Asia/Tokyo"), + "end_date": pd.Timestamp("2023/03/10", tz="Asia/Tokyo"), + }, + { + "type": "A1", + "start_date": pd.Timestamp("2023/03/01", tz="Asia/Tokyo"), + "end_date": pd.Timestamp("2023/03/11", tz="Asia/Tokyo"), + }, + ], + index=["aaaa", "bbbb"], + ) + result = df.melt( + id_vars=["type"], + value_vars=["start_date", "end_date"], + var_name="start/end", + value_name="date", + ) + expected = DataFrame( + { + "type": {0: "A0", 1: "A1", 2: "A0", 3: "A1"}, + "start/end": { + 0: "start_date", + 1: "start_date", + 2: "end_date", + 3: "end_date", + }, + "date": { + 0: pd.Timestamp("2023-03-01 00:00:00+0900", tz="Asia/Tokyo"), + 1: pd.Timestamp("2023-03-01 00:00:00+0900", tz="Asia/Tokyo"), + 2: pd.Timestamp("2023-03-10 00:00:00+0900", tz="Asia/Tokyo"), + 3: pd.Timestamp("2023-03-11 00:00:00+0900", tz="Asia/Tokyo"), + }, + } + ) + tm.assert_frame_equal(result, expected) + class TestLreshape: def test_pairs(self):
- [X ] closes #55254 (Replace xxxx with the GitHub issue number) - [X ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ X] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ X] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55270
2023-09-25T00:44:14Z
2023-10-03T21:56:40Z
2023-10-03T21:56:40Z
2023-10-03T21:56:47Z
TST: Clean up autouse fixtures
diff --git a/pandas/tests/arithmetic/conftest.py b/pandas/tests/arithmetic/conftest.py index 7dd5169202ba4..7ec77e5b65b7e 100644 --- a/pandas/tests/arithmetic/conftest.py +++ b/pandas/tests/arithmetic/conftest.py @@ -7,23 +7,8 @@ RangeIndex, ) import pandas._testing as tm -from pandas.core.computation import expressions as expr -@pytest.fixture(autouse=True, params=[0, 1000000], ids=["numexpr", "python"]) -def switch_numexpr_min_elements(request): - _MIN_ELEMENTS = expr._MIN_ELEMENTS - expr._MIN_ELEMENTS = request.param - yield request.param - expr._MIN_ELEMENTS = _MIN_ELEMENTS - - -# ------------------------------------------------------------------ - - -# doctest with +SKIP for one fixture fails during setup with -# 'DoctestItem' object has no attribute 'callspec' -# due to switch_numexpr_min_elements fixture @pytest.fixture(params=[1, np.array(1, dtype=np.int64)]) def one(request): """ @@ -58,9 +43,6 @@ def one(request): zeros.extend([0, 0.0, -0.0]) -# doctest with +SKIP for zero fixture fails during setup with -# 'DoctestItem' object has no attribute 'callspec' -# due to switch_numexpr_min_elements fixture @pytest.fixture(params=zeros) def zero(request): """ diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py index fa17c24fffb26..c93a03ad0f479 100644 --- a/pandas/tests/arithmetic/test_numeric.py +++ b/pandas/tests/arithmetic/test_numeric.py @@ -29,6 +29,13 @@ ) +@pytest.fixture(autouse=True, params=[0, 1000000], ids=["numexpr", "python"]) +def switch_numexpr_min_elements(request, monkeypatch): + with monkeypatch.context() as m: + m.setattr(expr, "_MIN_ELEMENTS", request.param) + yield request.param + + @pytest.fixture(params=[Index, Series, tm.to_array]) def box_pandas_1d_array(request): """ diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index 1488fa65fabc0..24a70e55e2f0e 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -33,11 +33,10 @@ @pytest.fixture(autouse=True, params=[0, 1000000], ids=["numexpr", "python"]) -def switch_numexpr_min_elements(request): - _MIN_ELEMENTS = expr._MIN_ELEMENTS - expr._MIN_ELEMENTS = request.param - yield request.param - expr._MIN_ELEMENTS = _MIN_ELEMENTS +def switch_numexpr_min_elements(request, monkeypatch): + with monkeypatch.context() as m: + m.setattr(expr, "_MIN_ELEMENTS", request.param) + yield request.param class DummyElement: @@ -1074,7 +1073,7 @@ def test_frame_with_frame_reindex(self): ], ids=lambda x: x.__name__, ) - def test_binop_other(self, op, value, dtype, switch_numexpr_min_elements, request): + def test_binop_other(self, op, value, dtype, switch_numexpr_min_elements): skip = { (operator.truediv, "bool"), (operator.pow, "bool"), diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index f3a1ebe23b568..53ee449c2dc0c 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -22,8 +22,6 @@ import pytest import pytz -from pandas._config import config - import pandas as pd from pandas import ( DataFrame, @@ -51,17 +49,6 @@ def get_local_am_pm(): return am_local, pm_local -@pytest.fixture(autouse=True) -def clean_config(): - curr_deprecated_options = config._deprecated_options.copy() - curr_registered_options = config._registered_options.copy() - curr_global_config = config._global_config.copy() - yield - config._deprecated_options = curr_deprecated_options - config._registered_options = curr_registered_options - config._global_config = curr_global_config - - @pytest.fixture(params=["string", "pathlike", "buffer"]) def filepath_or_buffer_id(request): """ @@ -3604,7 +3591,7 @@ def test_repr_html_ipython_config(ip): df._repr_html_() """ ) - result = ip.run_cell(code) + result = ip.run_cell(code, silent=True) assert not result.error_in_exec diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py index 6cf90749e5b30..dcee52011a691 100644 --- a/pandas/tests/io/test_html.py +++ b/pandas/tests/io/test_html.py @@ -99,15 +99,18 @@ def test_same_ordering(datapath): assert_framelist_equal(dfs_lxml, dfs_bs4) -@pytest.mark.parametrize( - "flavor", - [ +@pytest.fixture( + params=[ pytest.param("bs4", marks=[td.skip_if_no("bs4"), td.skip_if_no("html5lib")]), pytest.param("lxml", marks=td.skip_if_no("lxml")), ], ) +def flavor_read_html(request): + return partial(read_html, flavor=request.param) + + class TestReadHtml: - def test_literal_html_deprecation(self): + def test_literal_html_deprecation(self, flavor_read_html): # GH 53785 msg = ( "Passing literal html to 'read_html' is deprecated and " @@ -116,7 +119,7 @@ def test_literal_html_deprecation(self): ) with tm.assert_produces_warning(FutureWarning, match=msg): - self.read_html( + flavor_read_html( """<table> <thead> <tr> @@ -147,12 +150,7 @@ def spam_data(self, datapath): def banklist_data(self, datapath): return datapath("io", "data", "html", "banklist.html") - @pytest.fixture(autouse=True) - def set_defaults(self, flavor): - self.read_html = partial(read_html, flavor=flavor) - yield - - def test_to_html_compat(self): + def test_to_html_compat(self, flavor_read_html): df = ( tm.makeCustomDataframe( 4, @@ -165,12 +163,12 @@ def test_to_html_compat(self): .map("{:.3f}".format).astype(float) ) out = df.to_html() - res = self.read_html(StringIO(out), attrs={"class": "dataframe"}, index_col=0)[ - 0 - ] + res = flavor_read_html( + StringIO(out), attrs={"class": "dataframe"}, index_col=0 + )[0] tm.assert_frame_equal(res, df) - def test_dtype_backend(self, string_storage, dtype_backend): + def test_dtype_backend(self, string_storage, dtype_backend, flavor_read_html): # GH#50286 df = DataFrame( { @@ -196,7 +194,7 @@ def test_dtype_backend(self, string_storage, dtype_backend): out = df.to_html(index=False) with pd.option_context("mode.string_storage", string_storage): - result = self.read_html(StringIO(out), dtype_backend=dtype_backend)[0] + result = flavor_read_html(StringIO(out), dtype_backend=dtype_backend)[0] expected = DataFrame( { @@ -227,16 +225,16 @@ def test_dtype_backend(self, string_storage, dtype_backend): @pytest.mark.network @pytest.mark.single_cpu - def test_banklist_url(self, httpserver, banklist_data): + def test_banklist_url(self, httpserver, banklist_data, flavor_read_html): with open(banklist_data, encoding="utf-8") as f: httpserver.serve_content(content=f.read()) - df1 = self.read_html( + df1 = flavor_read_html( # lxml cannot find attrs leave out for now httpserver.url, match="First Federal Bank of Florida", # attrs={"class": "dataTable"} ) # lxml cannot find attrs leave out for now - df2 = self.read_html( + df2 = flavor_read_html( httpserver.url, match="Metcalf Bank", ) # attrs={"class": "dataTable"}) @@ -245,165 +243,169 @@ def test_banklist_url(self, httpserver, banklist_data): @pytest.mark.network @pytest.mark.single_cpu - def test_spam_url(self, httpserver, spam_data): + def test_spam_url(self, httpserver, spam_data, flavor_read_html): with open(spam_data, encoding="utf-8") as f: httpserver.serve_content(content=f.read()) - df1 = self.read_html(httpserver.url, match=".*Water.*") - df2 = self.read_html(httpserver.url, match="Unit") + df1 = flavor_read_html(httpserver.url, match=".*Water.*") + df2 = flavor_read_html(httpserver.url, match="Unit") assert_framelist_equal(df1, df2) @pytest.mark.slow - def test_banklist(self, banklist_data): - df1 = self.read_html(banklist_data, match=".*Florida.*", attrs={"id": "table"}) - df2 = self.read_html(banklist_data, match="Metcalf Bank", attrs={"id": "table"}) + def test_banklist(self, banklist_data, flavor_read_html): + df1 = flavor_read_html( + banklist_data, match=".*Florida.*", attrs={"id": "table"} + ) + df2 = flavor_read_html( + banklist_data, match="Metcalf Bank", attrs={"id": "table"} + ) assert_framelist_equal(df1, df2) - def test_spam(self, spam_data): - df1 = self.read_html(spam_data, match=".*Water.*") - df2 = self.read_html(spam_data, match="Unit") + def test_spam(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*") + df2 = flavor_read_html(spam_data, match="Unit") assert_framelist_equal(df1, df2) assert df1[0].iloc[0, 0] == "Proximates" assert df1[0].columns[0] == "Nutrient" - def test_spam_no_match(self, spam_data): - dfs = self.read_html(spam_data) + def test_spam_no_match(self, spam_data, flavor_read_html): + dfs = flavor_read_html(spam_data) for df in dfs: assert isinstance(df, DataFrame) - def test_banklist_no_match(self, banklist_data): - dfs = self.read_html(banklist_data, attrs={"id": "table"}) + def test_banklist_no_match(self, banklist_data, flavor_read_html): + dfs = flavor_read_html(banklist_data, attrs={"id": "table"}) for df in dfs: assert isinstance(df, DataFrame) - def test_spam_header(self, spam_data): - df = self.read_html(spam_data, match=".*Water.*", header=2)[0] + def test_spam_header(self, spam_data, flavor_read_html): + df = flavor_read_html(spam_data, match=".*Water.*", header=2)[0] assert df.columns[0] == "Proximates" assert not df.empty - def test_skiprows_int(self, spam_data): - df1 = self.read_html(spam_data, match=".*Water.*", skiprows=1) - df2 = self.read_html(spam_data, match="Unit", skiprows=1) + def test_skiprows_int(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", skiprows=1) + df2 = flavor_read_html(spam_data, match="Unit", skiprows=1) assert_framelist_equal(df1, df2) - def test_skiprows_range(self, spam_data): - df1 = self.read_html(spam_data, match=".*Water.*", skiprows=range(2)) - df2 = self.read_html(spam_data, match="Unit", skiprows=range(2)) + def test_skiprows_range(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", skiprows=range(2)) + df2 = flavor_read_html(spam_data, match="Unit", skiprows=range(2)) assert_framelist_equal(df1, df2) - def test_skiprows_list(self, spam_data): - df1 = self.read_html(spam_data, match=".*Water.*", skiprows=[1, 2]) - df2 = self.read_html(spam_data, match="Unit", skiprows=[2, 1]) + def test_skiprows_list(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", skiprows=[1, 2]) + df2 = flavor_read_html(spam_data, match="Unit", skiprows=[2, 1]) assert_framelist_equal(df1, df2) - def test_skiprows_set(self, spam_data): - df1 = self.read_html(spam_data, match=".*Water.*", skiprows={1, 2}) - df2 = self.read_html(spam_data, match="Unit", skiprows={2, 1}) + def test_skiprows_set(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", skiprows={1, 2}) + df2 = flavor_read_html(spam_data, match="Unit", skiprows={2, 1}) assert_framelist_equal(df1, df2) - def test_skiprows_slice(self, spam_data): - df1 = self.read_html(spam_data, match=".*Water.*", skiprows=1) - df2 = self.read_html(spam_data, match="Unit", skiprows=1) + def test_skiprows_slice(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", skiprows=1) + df2 = flavor_read_html(spam_data, match="Unit", skiprows=1) assert_framelist_equal(df1, df2) - def test_skiprows_slice_short(self, spam_data): - df1 = self.read_html(spam_data, match=".*Water.*", skiprows=slice(2)) - df2 = self.read_html(spam_data, match="Unit", skiprows=slice(2)) + def test_skiprows_slice_short(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", skiprows=slice(2)) + df2 = flavor_read_html(spam_data, match="Unit", skiprows=slice(2)) assert_framelist_equal(df1, df2) - def test_skiprows_slice_long(self, spam_data): - df1 = self.read_html(spam_data, match=".*Water.*", skiprows=slice(2, 5)) - df2 = self.read_html(spam_data, match="Unit", skiprows=slice(4, 1, -1)) + def test_skiprows_slice_long(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", skiprows=slice(2, 5)) + df2 = flavor_read_html(spam_data, match="Unit", skiprows=slice(4, 1, -1)) assert_framelist_equal(df1, df2) - def test_skiprows_ndarray(self, spam_data): - df1 = self.read_html(spam_data, match=".*Water.*", skiprows=np.arange(2)) - df2 = self.read_html(spam_data, match="Unit", skiprows=np.arange(2)) + def test_skiprows_ndarray(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", skiprows=np.arange(2)) + df2 = flavor_read_html(spam_data, match="Unit", skiprows=np.arange(2)) assert_framelist_equal(df1, df2) - def test_skiprows_invalid(self, spam_data): + def test_skiprows_invalid(self, spam_data, flavor_read_html): with pytest.raises(TypeError, match=("is not a valid type for skipping rows")): - self.read_html(spam_data, match=".*Water.*", skiprows="asdf") + flavor_read_html(spam_data, match=".*Water.*", skiprows="asdf") - def test_index(self, spam_data): - df1 = self.read_html(spam_data, match=".*Water.*", index_col=0) - df2 = self.read_html(spam_data, match="Unit", index_col=0) + def test_index(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", index_col=0) + df2 = flavor_read_html(spam_data, match="Unit", index_col=0) assert_framelist_equal(df1, df2) - def test_header_and_index_no_types(self, spam_data): - df1 = self.read_html(spam_data, match=".*Water.*", header=1, index_col=0) - df2 = self.read_html(spam_data, match="Unit", header=1, index_col=0) + def test_header_and_index_no_types(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", header=1, index_col=0) + df2 = flavor_read_html(spam_data, match="Unit", header=1, index_col=0) assert_framelist_equal(df1, df2) - def test_header_and_index_with_types(self, spam_data): - df1 = self.read_html(spam_data, match=".*Water.*", header=1, index_col=0) - df2 = self.read_html(spam_data, match="Unit", header=1, index_col=0) + def test_header_and_index_with_types(self, spam_data, flavor_read_html): + df1 = flavor_read_html(spam_data, match=".*Water.*", header=1, index_col=0) + df2 = flavor_read_html(spam_data, match="Unit", header=1, index_col=0) assert_framelist_equal(df1, df2) - def test_infer_types(self, spam_data): + def test_infer_types(self, spam_data, flavor_read_html): # 10892 infer_types removed - df1 = self.read_html(spam_data, match=".*Water.*", index_col=0) - df2 = self.read_html(spam_data, match="Unit", index_col=0) + df1 = flavor_read_html(spam_data, match=".*Water.*", index_col=0) + df2 = flavor_read_html(spam_data, match="Unit", index_col=0) assert_framelist_equal(df1, df2) - def test_string_io(self, spam_data): + def test_string_io(self, spam_data, flavor_read_html): with open(spam_data, encoding="UTF-8") as f: data1 = StringIO(f.read()) with open(spam_data, encoding="UTF-8") as f: data2 = StringIO(f.read()) - df1 = self.read_html(data1, match=".*Water.*") - df2 = self.read_html(data2, match="Unit") + df1 = flavor_read_html(data1, match=".*Water.*") + df2 = flavor_read_html(data2, match="Unit") assert_framelist_equal(df1, df2) - def test_string(self, spam_data): + def test_string(self, spam_data, flavor_read_html): with open(spam_data, encoding="UTF-8") as f: data = f.read() - df1 = self.read_html(StringIO(data), match=".*Water.*") - df2 = self.read_html(StringIO(data), match="Unit") + df1 = flavor_read_html(StringIO(data), match=".*Water.*") + df2 = flavor_read_html(StringIO(data), match="Unit") assert_framelist_equal(df1, df2) - def test_file_like(self, spam_data): + def test_file_like(self, spam_data, flavor_read_html): with open(spam_data, encoding="UTF-8") as f: - df1 = self.read_html(f, match=".*Water.*") + df1 = flavor_read_html(f, match=".*Water.*") with open(spam_data, encoding="UTF-8") as f: - df2 = self.read_html(f, match="Unit") + df2 = flavor_read_html(f, match="Unit") assert_framelist_equal(df1, df2) @pytest.mark.network @pytest.mark.single_cpu - def test_bad_url_protocol(self, httpserver): + def test_bad_url_protocol(self, httpserver, flavor_read_html): httpserver.serve_content("urlopen error unknown url type: git", code=404) with pytest.raises(URLError, match="urlopen error unknown url type: git"): - self.read_html("git://github.com", match=".*Water.*") + flavor_read_html("git://github.com", match=".*Water.*") @pytest.mark.slow @pytest.mark.network @pytest.mark.single_cpu - def test_invalid_url(self, httpserver): + def test_invalid_url(self, httpserver, flavor_read_html): httpserver.serve_content("Name or service not known", code=404) with pytest.raises((URLError, ValueError), match="HTTP Error 404: NOT FOUND"): - self.read_html(httpserver.url, match=".*Water.*") + flavor_read_html(httpserver.url, match=".*Water.*") @pytest.mark.slow - def test_file_url(self, banklist_data): + def test_file_url(self, banklist_data, flavor_read_html): url = banklist_data - dfs = self.read_html( + dfs = flavor_read_html( file_path_to_url(os.path.abspath(url)), match="First", attrs={"id": "table"} ) assert isinstance(dfs, list) @@ -411,54 +413,78 @@ def test_file_url(self, banklist_data): assert isinstance(df, DataFrame) @pytest.mark.slow - def test_invalid_table_attrs(self, banklist_data): + def test_invalid_table_attrs(self, banklist_data, flavor_read_html): url = banklist_data with pytest.raises(ValueError, match="No tables found"): - self.read_html( + flavor_read_html( url, match="First Federal Bank of Florida", attrs={"id": "tasdfable"} ) - def _bank_data(self, path, **kwargs): - return self.read_html(path, match="Metcalf", attrs={"id": "table"}, **kwargs) - @pytest.mark.slow - def test_multiindex_header(self, banklist_data): - df = self._bank_data(banklist_data, header=[0, 1])[0] + def test_multiindex_header(self, banklist_data, flavor_read_html): + df = flavor_read_html( + banklist_data, match="Metcalf", attrs={"id": "table"}, header=[0, 1] + )[0] assert isinstance(df.columns, MultiIndex) @pytest.mark.slow - def test_multiindex_index(self, banklist_data): - df = self._bank_data(banklist_data, index_col=[0, 1])[0] + def test_multiindex_index(self, banklist_data, flavor_read_html): + df = flavor_read_html( + banklist_data, match="Metcalf", attrs={"id": "table"}, index_col=[0, 1] + )[0] assert isinstance(df.index, MultiIndex) @pytest.mark.slow - def test_multiindex_header_index(self, banklist_data): - df = self._bank_data(banklist_data, header=[0, 1], index_col=[0, 1])[0] + def test_multiindex_header_index(self, banklist_data, flavor_read_html): + df = flavor_read_html( + banklist_data, + match="Metcalf", + attrs={"id": "table"}, + header=[0, 1], + index_col=[0, 1], + )[0] assert isinstance(df.columns, MultiIndex) assert isinstance(df.index, MultiIndex) @pytest.mark.slow - def test_multiindex_header_skiprows_tuples(self, banklist_data): - df = self._bank_data(banklist_data, header=[0, 1], skiprows=1)[0] + def test_multiindex_header_skiprows_tuples(self, banklist_data, flavor_read_html): + df = flavor_read_html( + banklist_data, + match="Metcalf", + attrs={"id": "table"}, + header=[0, 1], + skiprows=1, + )[0] assert isinstance(df.columns, MultiIndex) @pytest.mark.slow - def test_multiindex_header_skiprows(self, banklist_data): - df = self._bank_data(banklist_data, header=[0, 1], skiprows=1)[0] + def test_multiindex_header_skiprows(self, banklist_data, flavor_read_html): + df = flavor_read_html( + banklist_data, + match="Metcalf", + attrs={"id": "table"}, + header=[0, 1], + skiprows=1, + )[0] assert isinstance(df.columns, MultiIndex) @pytest.mark.slow - def test_multiindex_header_index_skiprows(self, banklist_data): - df = self._bank_data( - banklist_data, header=[0, 1], index_col=[0, 1], skiprows=1 + def test_multiindex_header_index_skiprows(self, banklist_data, flavor_read_html): + df = flavor_read_html( + banklist_data, + match="Metcalf", + attrs={"id": "table"}, + header=[0, 1], + index_col=[0, 1], + skiprows=1, )[0] assert isinstance(df.index, MultiIndex) assert isinstance(df.columns, MultiIndex) @pytest.mark.slow - def test_regex_idempotency(self, banklist_data): + def test_regex_idempotency(self, banklist_data, flavor_read_html): url = banklist_data - dfs = self.read_html( + dfs = flavor_read_html( file_path_to_url(os.path.abspath(url)), match=re.compile(re.compile("Florida")), attrs={"id": "table"}, @@ -467,10 +493,10 @@ def test_regex_idempotency(self, banklist_data): for df in dfs: assert isinstance(df, DataFrame) - def test_negative_skiprows(self, spam_data): + def test_negative_skiprows(self, spam_data, flavor_read_html): msg = r"\(you passed a negative value\)" with pytest.raises(ValueError, match=msg): - self.read_html(spam_data, match="Water", skiprows=-1) + flavor_read_html(spam_data, match="Water", skiprows=-1) @pytest.fixture def python_docs(self): @@ -523,20 +549,20 @@ def python_docs(self): @pytest.mark.network @pytest.mark.single_cpu - def test_multiple_matches(self, python_docs, httpserver): + def test_multiple_matches(self, python_docs, httpserver, flavor_read_html): httpserver.serve_content(content=python_docs) - dfs = self.read_html(httpserver.url, match="Python") + dfs = flavor_read_html(httpserver.url, match="Python") assert len(dfs) > 1 @pytest.mark.network @pytest.mark.single_cpu - def test_python_docs_table(self, python_docs, httpserver): + def test_python_docs_table(self, python_docs, httpserver, flavor_read_html): httpserver.serve_content(content=python_docs) - dfs = self.read_html(httpserver.url, match="Python") + dfs = flavor_read_html(httpserver.url, match="Python") zz = [df.iloc[0, 0][0:4] for df in dfs] assert sorted(zz) == ["Pyth", "What"] - def test_empty_tables(self): + def test_empty_tables(self, flavor_read_html): """ Make sure that read_html ignores empty tables. """ @@ -560,13 +586,13 @@ def test_empty_tables(self): </tbody> </table> """ - result = self.read_html(StringIO(html)) + result = flavor_read_html(StringIO(html)) assert len(result) == 1 - def test_multiple_tbody(self): + def test_multiple_tbody(self, flavor_read_html): # GH-20690 # Read all tbody tags within a single table. - result = self.read_html( + result = flavor_read_html( StringIO( """<table> <thead> @@ -595,12 +621,12 @@ def test_multiple_tbody(self): tm.assert_frame_equal(result, expected) - def test_header_and_one_column(self): + def test_header_and_one_column(self, flavor_read_html): """ Don't fail with bs4 when there is a header and only one column as described in issue #9178 """ - result = self.read_html( + result = flavor_read_html( StringIO( """<table> <thead> @@ -621,11 +647,11 @@ def test_header_and_one_column(self): tm.assert_frame_equal(result, expected) - def test_thead_without_tr(self): + def test_thead_without_tr(self, flavor_read_html): """ Ensure parser adds <tr> within <thead> on malformed HTML. """ - result = self.read_html( + result = flavor_read_html( StringIO( """<table> <thead> @@ -653,7 +679,7 @@ def test_thead_without_tr(self): tm.assert_frame_equal(result, expected) - def test_tfoot_read(self): + def test_tfoot_read(self, flavor_read_html): """ Make sure that read_html reads tfoot, containing td or th. Ignores empty tfoot @@ -685,16 +711,16 @@ def test_tfoot_read(self): data1 = data_template.format(footer="") data2 = data_template.format(footer="<tr><td>footA</td><th>footB</th></tr>") - result1 = self.read_html(StringIO(data1))[0] - result2 = self.read_html(StringIO(data2))[0] + result1 = flavor_read_html(StringIO(data1))[0] + result2 = flavor_read_html(StringIO(data2))[0] tm.assert_frame_equal(result1, expected1) tm.assert_frame_equal(result2, expected2) - def test_parse_header_of_non_string_column(self): + def test_parse_header_of_non_string_column(self, flavor_read_html): # GH5048: if header is specified explicitly, an int column should be # parsed as int while its header is parsed as str - result = self.read_html( + result = flavor_read_html( StringIO( """ <table> @@ -717,7 +743,7 @@ def test_parse_header_of_non_string_column(self): tm.assert_frame_equal(result, expected) @pytest.mark.slow - def test_banklist_header(self, banklist_data, datapath): + def test_banklist_header(self, banklist_data, datapath, flavor_read_html): from pandas.io.html import _remove_whitespace def try_remove_ws(x): @@ -726,7 +752,7 @@ def try_remove_ws(x): except AttributeError: return x - df = self.read_html(banklist_data, match="Metcalf", attrs={"id": "table"})[0] + df = flavor_read_html(banklist_data, match="Metcalf", attrs={"id": "table"})[0] ground_truth = read_csv( datapath("io", "data", "csv", "banklist.csv"), converters={"Updated Date": Timestamp, "Closing Date": Timestamp}, @@ -765,19 +791,19 @@ def try_remove_ws(x): tm.assert_frame_equal(converted, gtnew) @pytest.mark.slow - def test_gold_canyon(self, banklist_data): + def test_gold_canyon(self, banklist_data, flavor_read_html): gc = "Gold Canyon" with open(banklist_data, encoding="utf-8") as f: raw_text = f.read() assert gc in raw_text - df = self.read_html(banklist_data, match="Gold Canyon", attrs={"id": "table"})[ - 0 - ] + df = flavor_read_html( + banklist_data, match="Gold Canyon", attrs={"id": "table"} + )[0] assert gc in df.to_string() - def test_different_number_of_cols(self): - expected = self.read_html( + def test_different_number_of_cols(self, flavor_read_html): + expected = flavor_read_html( StringIO( """<table> <thead> @@ -813,7 +839,7 @@ def test_different_number_of_cols(self): index_col=0, )[0] - result = self.read_html( + result = flavor_read_html( StringIO( """<table> <thead> @@ -848,9 +874,9 @@ def test_different_number_of_cols(self): tm.assert_frame_equal(result, expected) - def test_colspan_rowspan_1(self): + def test_colspan_rowspan_1(self, flavor_read_html): # GH17054 - result = self.read_html( + result = flavor_read_html( StringIO( """ <table> @@ -873,7 +899,7 @@ def test_colspan_rowspan_1(self): tm.assert_frame_equal(result, expected) - def test_colspan_rowspan_copy_values(self): + def test_colspan_rowspan_copy_values(self, flavor_read_html): # GH17054 # In ASCII, with lowercase letters being copies: @@ -881,7 +907,7 @@ def test_colspan_rowspan_copy_values(self): # X x Y Z W # A B b z C - result = self.read_html( + result = flavor_read_html( StringIO( """ <table> @@ -908,7 +934,7 @@ def test_colspan_rowspan_copy_values(self): tm.assert_frame_equal(result, expected) - def test_colspan_rowspan_both_not_1(self): + def test_colspan_rowspan_both_not_1(self, flavor_read_html): # GH17054 # In ASCII, with lowercase letters being copies: @@ -916,7 +942,7 @@ def test_colspan_rowspan_both_not_1(self): # A B b b C # a b b b D - result = self.read_html( + result = flavor_read_html( StringIO( """ <table> @@ -940,7 +966,7 @@ def test_colspan_rowspan_both_not_1(self): tm.assert_frame_equal(result, expected) - def test_rowspan_at_end_of_row(self): + def test_rowspan_at_end_of_row(self, flavor_read_html): # GH17054 # In ASCII, with lowercase letters being copies: @@ -948,7 +974,7 @@ def test_rowspan_at_end_of_row(self): # A B # C b - result = self.read_html( + result = flavor_read_html( StringIO( """ <table> @@ -969,10 +995,10 @@ def test_rowspan_at_end_of_row(self): tm.assert_frame_equal(result, expected) - def test_rowspan_only_rows(self): + def test_rowspan_only_rows(self, flavor_read_html): # GH17054 - result = self.read_html( + result = flavor_read_html( StringIO( """ <table> @@ -990,9 +1016,9 @@ def test_rowspan_only_rows(self): tm.assert_frame_equal(result, expected) - def test_header_inferred_from_rows_with_only_th(self): + def test_header_inferred_from_rows_with_only_th(self, flavor_read_html): # GH17054 - result = self.read_html( + result = flavor_read_html( StringIO( """ <table> @@ -1018,15 +1044,15 @@ def test_header_inferred_from_rows_with_only_th(self): tm.assert_frame_equal(result, expected) - def test_parse_dates_list(self): + def test_parse_dates_list(self, flavor_read_html): df = DataFrame({"date": date_range("1/1/2001", periods=10)}) expected = df.to_html() - res = self.read_html(StringIO(expected), parse_dates=[1], index_col=0) + res = flavor_read_html(StringIO(expected), parse_dates=[1], index_col=0) tm.assert_frame_equal(df, res[0]) - res = self.read_html(StringIO(expected), parse_dates=["date"], index_col=0) + res = flavor_read_html(StringIO(expected), parse_dates=["date"], index_col=0) tm.assert_frame_equal(df, res[0]) - def test_parse_dates_combine(self): + def test_parse_dates_combine(self, flavor_read_html): raw_dates = Series(date_range("1/1/2001", periods=10)) df = DataFrame( { @@ -1034,32 +1060,32 @@ def test_parse_dates_combine(self): "time": raw_dates.map(lambda x: str(x.time())), } ) - res = self.read_html( + res = flavor_read_html( StringIO(df.to_html()), parse_dates={"datetime": [1, 2]}, index_col=1 ) newdf = DataFrame({"datetime": raw_dates}) tm.assert_frame_equal(newdf, res[0]) - def test_wikipedia_states_table(self, datapath): + def test_wikipedia_states_table(self, datapath, flavor_read_html): data = datapath("io", "data", "html", "wikipedia_states.html") assert os.path.isfile(data), f"{repr(data)} is not a file" assert os.path.getsize(data), f"{repr(data)} is an empty file" - result = self.read_html(data, match="Arizona", header=1)[0] + result = flavor_read_html(data, match="Arizona", header=1)[0] assert result.shape == (60, 12) assert "Unnamed" in result.columns[-1] assert result["sq mi"].dtype == np.dtype("float64") assert np.allclose(result.loc[0, "sq mi"], 665384.04) - def test_wikipedia_states_multiindex(self, datapath): + def test_wikipedia_states_multiindex(self, datapath, flavor_read_html): data = datapath("io", "data", "html", "wikipedia_states.html") - result = self.read_html(data, match="Arizona", index_col=0)[0] + result = flavor_read_html(data, match="Arizona", index_col=0)[0] assert result.shape == (60, 11) assert "Unnamed" in result.columns[-1][1] assert result.columns.nlevels == 2 assert np.allclose(result.loc["Alaska", ("Total area[2]", "sq mi")], 665384.04) - def test_parser_error_on_empty_header_row(self): - result = self.read_html( + def test_parser_error_on_empty_header_row(self, flavor_read_html): + result = flavor_read_html( StringIO( """ <table> @@ -1083,9 +1109,9 @@ def test_parser_error_on_empty_header_row(self): ) tm.assert_frame_equal(result[0], expected) - def test_decimal_rows(self): + def test_decimal_rows(self, flavor_read_html): # GH 12907 - result = self.read_html( + result = flavor_read_html( StringIO( """<html> <body> @@ -1113,7 +1139,7 @@ def test_decimal_rows(self): tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("arg", [True, False]) - def test_bool_header_arg(self, spam_data, arg): + def test_bool_header_arg(self, spam_data, arg, flavor_read_html): # GH 6114 msg = re.escape( "Passing a bool to header is invalid. Use header=None for no header or " @@ -1121,11 +1147,11 @@ def test_bool_header_arg(self, spam_data, arg): "column names" ) with pytest.raises(TypeError, match=msg): - self.read_html(spam_data, header=arg) + flavor_read_html(spam_data, header=arg) - def test_converters(self): + def test_converters(self, flavor_read_html): # GH 13461 - result = self.read_html( + result = flavor_read_html( StringIO( """<table> <thead> @@ -1150,9 +1176,9 @@ def test_converters(self): tm.assert_frame_equal(result, expected) - def test_na_values(self): + def test_na_values(self, flavor_read_html): # GH 13461 - result = self.read_html( + result = flavor_read_html( StringIO( """<table> <thead> @@ -1177,7 +1203,7 @@ def test_na_values(self): tm.assert_frame_equal(result, expected) - def test_keep_default_na(self): + def test_keep_default_na(self, flavor_read_html): html_data = """<table> <thead> <tr> @@ -1195,15 +1221,15 @@ def test_keep_default_na(self): </table>""" expected_df = DataFrame({"a": ["N/A", "NA"]}) - html_df = self.read_html(StringIO(html_data), keep_default_na=False)[0] + html_df = flavor_read_html(StringIO(html_data), keep_default_na=False)[0] tm.assert_frame_equal(expected_df, html_df) expected_df = DataFrame({"a": [np.nan, np.nan]}) - html_df = self.read_html(StringIO(html_data), keep_default_na=True)[0] + html_df = flavor_read_html(StringIO(html_data), keep_default_na=True)[0] tm.assert_frame_equal(expected_df, html_df) - def test_preserve_empty_rows(self): - result = self.read_html( + def test_preserve_empty_rows(self, flavor_read_html): + result = flavor_read_html( StringIO( """ <table> @@ -1228,8 +1254,8 @@ def test_preserve_empty_rows(self): tm.assert_frame_equal(result, expected) - def test_ignore_empty_rows_when_inferring_header(self): - result = self.read_html( + def test_ignore_empty_rows_when_inferring_header(self, flavor_read_html): + result = flavor_read_html( StringIO( """ <table> @@ -1251,7 +1277,7 @@ def test_ignore_empty_rows_when_inferring_header(self): tm.assert_frame_equal(result, expected) - def test_multiple_header_rows(self): + def test_multiple_header_rows(self, flavor_read_html): # Issue #13434 expected_df = DataFrame( data=[("Hillary", 68, "D"), ("Bernie", 74, "D"), ("Donald", 69, "R")] @@ -1261,20 +1287,20 @@ def test_multiple_header_rows(self): ["Name", "Unnamed: 1_level_1", "Unnamed: 2_level_1"], ] html = expected_df.to_html(index=False) - html_df = self.read_html(StringIO(html))[0] + html_df = flavor_read_html(StringIO(html))[0] tm.assert_frame_equal(expected_df, html_df) - def test_works_on_valid_markup(self, datapath): + def test_works_on_valid_markup(self, datapath, flavor_read_html): filename = datapath("io", "data", "html", "valid_markup.html") - dfs = self.read_html(filename, index_col=0) + dfs = flavor_read_html(filename, index_col=0) assert isinstance(dfs, list) assert isinstance(dfs[0], DataFrame) @pytest.mark.slow - def test_fallback_success(self, datapath): + def test_fallback_success(self, datapath, flavor_read_html): banklist_data = datapath("io", "data", "html", "banklist.html") - self.read_html(banklist_data, match=".*Water.*", flavor=["lxml", "html5lib"]) + flavor_read_html(banklist_data, match=".*Water.*", flavor=["lxml", "html5lib"]) def test_to_html_timestamp(self): rng = date_range("2000-01-01", periods=10) @@ -1309,7 +1335,7 @@ def test_to_html_borderless(self): (False, DataFrame(["foo bar baz qux"]), DataFrame(["foo"])), ], ) - def test_displayed_only(self, displayed_only, exp0, exp1): + def test_displayed_only(self, displayed_only, exp0, exp1, flavor_read_html): # GH 20027 data = """<html> <body> @@ -1331,7 +1357,7 @@ def test_displayed_only(self, displayed_only, exp0, exp1): </body> </html>""" - dfs = self.read_html(StringIO(data), displayed_only=displayed_only) + dfs = flavor_read_html(StringIO(data), displayed_only=displayed_only) tm.assert_frame_equal(dfs[0], exp0) if exp1 is not None: @@ -1340,7 +1366,7 @@ def test_displayed_only(self, displayed_only, exp0, exp1): assert len(dfs) == 1 # Should not parse hidden table @pytest.mark.parametrize("displayed_only", [True, False]) - def test_displayed_only_with_many_elements(self, displayed_only): + def test_displayed_only_with_many_elements(self, displayed_only, flavor_read_html): html_table = """ <table> <tr> @@ -1357,7 +1383,9 @@ def test_displayed_only_with_many_elements(self, displayed_only): </tr> </table> """ - result = read_html(StringIO(html_table), displayed_only=displayed_only)[0] + result = flavor_read_html(StringIO(html_table), displayed_only=displayed_only)[ + 0 + ] expected = DataFrame({"A": [1, 4], "B": [2, 5]}) tm.assert_frame_equal(result, expected) @@ -1365,23 +1393,23 @@ def test_displayed_only_with_many_elements(self, displayed_only): "ignore:You provided Unicode markup but also provided a value for " "from_encoding.*:UserWarning" ) - def test_encode(self, html_encoding_file): + def test_encode(self, html_encoding_file, flavor_read_html): base_path = os.path.basename(html_encoding_file) root = os.path.splitext(base_path)[0] _, encoding = root.split("_") try: with open(html_encoding_file, "rb") as fobj: - from_string = self.read_html( + from_string = flavor_read_html( fobj.read(), encoding=encoding, index_col=0 ).pop() with open(html_encoding_file, "rb") as fobj: - from_file_like = self.read_html( + from_file_like = flavor_read_html( BytesIO(fobj.read()), encoding=encoding, index_col=0 ).pop() - from_filename = self.read_html( + from_filename = flavor_read_html( html_encoding_file, encoding=encoding, index_col=0 ).pop() tm.assert_frame_equal(from_string, from_file_like) @@ -1393,10 +1421,10 @@ def test_encode(self, html_encoding_file): pytest.skip() raise - def test_parse_failure_unseekable(self): + def test_parse_failure_unseekable(self, flavor_read_html): # Issue #17975 - if self.read_html.keywords.get("flavor") == "lxml": + if flavor_read_html.keywords.get("flavor") == "lxml": pytest.skip("Not applicable for lxml") class UnseekableStringIO(StringIO): @@ -1408,12 +1436,12 @@ def seekable(self): <table><tr><td>spam<foobr />eggs</td></tr></table>""" ) - assert self.read_html(bad) + assert flavor_read_html(bad) with pytest.raises(ValueError, match="passed a non-rewindable file object"): - self.read_html(bad) + flavor_read_html(bad) - def test_parse_failure_rewinds(self): + def test_parse_failure_rewinds(self, flavor_read_html): # Issue #17975 class MockFile: @@ -1444,12 +1472,12 @@ def __iter__(self) -> Iterator: good = MockFile("<table><tr><td>spam<br />eggs</td></tr></table>") bad = MockFile("<table><tr><td>spam<foobr />eggs</td></tr></table>") - assert self.read_html(good) - assert self.read_html(bad) + assert flavor_read_html(good) + assert flavor_read_html(bad) @pytest.mark.slow @pytest.mark.single_cpu - def test_importcheck_thread_safety(self, datapath): + def test_importcheck_thread_safety(self, datapath, flavor_read_html): # see gh-16928 class ErrorThread(threading.Thread): @@ -1462,8 +1490,8 @@ def run(self): self.err = None filename = datapath("io", "data", "html", "valid_markup.html") - helper_thread1 = ErrorThread(target=self.read_html, args=(filename,)) - helper_thread2 = ErrorThread(target=self.read_html, args=(filename,)) + helper_thread1 = ErrorThread(target=flavor_read_html, args=(filename,)) + helper_thread2 = ErrorThread(target=flavor_read_html, args=(filename,)) helper_thread1.start() helper_thread2.start() @@ -1472,17 +1500,17 @@ def run(self): pass assert None is helper_thread1.err is helper_thread2.err - def test_parse_path_object(self, datapath): + def test_parse_path_object(self, datapath, flavor_read_html): # GH 37705 file_path_string = datapath("io", "data", "html", "spam.html") file_path = Path(file_path_string) - df1 = self.read_html(file_path_string)[0] - df2 = self.read_html(file_path)[0] + df1 = flavor_read_html(file_path_string)[0] + df2 = flavor_read_html(file_path)[0] tm.assert_frame_equal(df1, df2) - def test_parse_br_as_space(self): + def test_parse_br_as_space(self, flavor_read_html): # GH 29528: pd.read_html() convert <br> to space - result = self.read_html( + result = flavor_read_html( StringIO( """ <table> @@ -1502,7 +1530,7 @@ def test_parse_br_as_space(self): tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("arg", ["all", "body", "header", "footer"]) - def test_extract_links(self, arg): + def test_extract_links(self, arg, flavor_read_html): gh_13141_data = """ <table> <tr> @@ -1565,7 +1593,7 @@ def test_extract_links(self, arg): elif arg == "header": head_exp = gh_13141_expected["head_extract"] - result = self.read_html(StringIO(gh_13141_data), extract_links=arg)[0] + result = flavor_read_html(StringIO(gh_13141_data), extract_links=arg)[0] expected = DataFrame([data_exp, foot_exp], columns=head_exp) expected = expected.fillna(np.nan) tm.assert_frame_equal(result, expected) @@ -1578,7 +1606,7 @@ def test_extract_links_bad(self, spam_data): with pytest.raises(ValueError, match=msg): read_html(spam_data, extract_links="incorrect") - def test_extract_links_all_no_header(self): + def test_extract_links_all_no_header(self, flavor_read_html): # GH 48316 data = """ <table> @@ -1589,7 +1617,7 @@ def test_extract_links_all_no_header(self): </tr> </table> """ - result = self.read_html(StringIO(data), extract_links="all")[0] + result = flavor_read_html(StringIO(data), extract_links="all")[0] expected = DataFrame([[("Google.com", "https://google.com")]]) tm.assert_frame_equal(result, expected) @@ -1601,7 +1629,7 @@ def test_invalid_dtype_backend(self): with pytest.raises(ValueError, match=msg): read_html("test", dtype_backend="numpy") - def test_style_tag(self): + def test_style_tag(self, flavor_read_html): # GH 48316 data = """ <table> @@ -1622,6 +1650,6 @@ def test_style_tag(self): </tr> </table> """ - result = self.read_html(StringIO(data))[0] + result = flavor_read_html(StringIO(data))[0] expected = DataFrame(data=[["A1", "B1"], ["A2", "B2"]], columns=["A", "B"]) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py index 55fc77fb5705f..8547fd6988791 100644 --- a/pandas/tests/series/test_arithmetic.py +++ b/pandas/tests/series/test_arithmetic.py @@ -30,11 +30,10 @@ @pytest.fixture(autouse=True, params=[0, 1000000], ids=["numexpr", "python"]) -def switch_numexpr_min_elements(request): - _MIN_ELEMENTS = expr._MIN_ELEMENTS - expr._MIN_ELEMENTS = request.param - yield request.param - expr._MIN_ELEMENTS = _MIN_ELEMENTS +def switch_numexpr_min_elements(request, monkeypatch): + with monkeypatch.context() as m: + m.setattr(expr, "_MIN_ELEMENTS", request.param) + yield def _permute(obj): diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index 1e66cefbcfdd0..dfec99f0786eb 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -102,12 +102,6 @@ def _array_mixed2(_mixed2): @pytest.mark.skipif(not expr.USE_NUMEXPR, reason="not using numexpr") class TestExpressions: - @pytest.fixture(autouse=True) - def save_min_elements(self): - min_elements = expr._MIN_ELEMENTS - yield - expr._MIN_ELEMENTS = min_elements - @staticmethod def call_op(df, other, flex: bool, opname: str): if flex: @@ -140,21 +134,24 @@ def call_op(df, other, flex: bool, opname: str): @pytest.mark.parametrize( "arith", ["add", "sub", "mul", "mod", "truediv", "floordiv"] ) - def test_run_arithmetic(self, request, fixture, flex, arith): + def test_run_arithmetic(self, request, fixture, flex, arith, monkeypatch): df = request.getfixturevalue(fixture) - expr._MIN_ELEMENTS = 0 - result, expected = self.call_op(df, df, flex, arith) - - if arith == "truediv": - assert all(x.kind == "f" for x in expected.dtypes.values) - tm.assert_equal(expected, result) + with monkeypatch.context() as m: + m.setattr(expr, "_MIN_ELEMENTS", 0) + result, expected = self.call_op(df, df, flex, arith) - for i in range(len(df.columns)): - result, expected = self.call_op(df.iloc[:, i], df.iloc[:, i], flex, arith) if arith == "truediv": - assert expected.dtype.kind == "f" + assert all(x.kind == "f" for x in expected.dtypes.values) tm.assert_equal(expected, result) + for i in range(len(df.columns)): + result, expected = self.call_op( + df.iloc[:, i], df.iloc[:, i], flex, arith + ) + if arith == "truediv": + assert expected.dtype.kind == "f" + tm.assert_equal(expected, result) + @pytest.mark.parametrize( "fixture", [ @@ -168,7 +165,7 @@ def test_run_arithmetic(self, request, fixture, flex, arith): ], ) @pytest.mark.parametrize("flex", [True, False]) - def test_run_binary(self, request, fixture, flex, comparison_op): + def test_run_binary(self, request, fixture, flex, comparison_op, monkeypatch): """ tests solely that the result is the same whether or not numexpr is enabled. Need to test whether the function does the correct thing @@ -179,18 +176,19 @@ def test_run_binary(self, request, fixture, flex, comparison_op): with option_context("compute.use_numexpr", False): other = df.copy() + 1 - expr._MIN_ELEMENTS = 0 - expr.set_test_mode(True) + with monkeypatch.context() as m: + m.setattr(expr, "_MIN_ELEMENTS", 0) + expr.set_test_mode(True) - result, expected = self.call_op(df, other, flex, arith) + result, expected = self.call_op(df, other, flex, arith) - used_numexpr = expr.get_test_result() - assert used_numexpr, "Did not use numexpr as expected." - tm.assert_equal(expected, result) + used_numexpr = expr.get_test_result() + assert used_numexpr, "Did not use numexpr as expected." + tm.assert_equal(expected, result) - for i in range(len(df.columns)): - binary_comp = other.iloc[:, i] + 1 - self.call_op(df.iloc[:, i], binary_comp, flex, "add") + for i in range(len(df.columns)): + binary_comp = other.iloc[:, i] + 1 + self.call_op(df.iloc[:, i], binary_comp, flex, "add") def test_invalid(self): array = np.random.default_rng(2).standard_normal(1_000_001) @@ -406,7 +404,7 @@ def test_bool_ops_column_name_dtype(self, test_input, expected): "arith", ("add", "sub", "mul", "mod", "truediv", "floordiv") ) @pytest.mark.parametrize("axis", (0, 1)) - def test_frame_series_axis(self, axis, arith, _frame): + def test_frame_series_axis(self, axis, arith, _frame, monkeypatch): # GH#26736 Dataframe.floordiv(Series, axis=1) fails df = _frame @@ -415,15 +413,16 @@ def test_frame_series_axis(self, axis, arith, _frame): else: other = df.iloc[:, 0] - expr._MIN_ELEMENTS = 0 + with monkeypatch.context() as m: + m.setattr(expr, "_MIN_ELEMENTS", 0) - op_func = getattr(df, arith) + op_func = getattr(df, arith) - with option_context("compute.use_numexpr", False): - expected = op_func(other, axis=axis) + with option_context("compute.use_numexpr", False): + expected = op_func(other, axis=axis) - result = op_func(other, axis=axis) - tm.assert_frame_equal(expected, result) + result = op_func(other, axis=axis) + tm.assert_frame_equal(expected, result) @pytest.mark.parametrize( "op", @@ -436,29 +435,32 @@ def test_frame_series_axis(self, axis, arith, _frame): ) @pytest.mark.parametrize("box", [DataFrame, Series, Index]) @pytest.mark.parametrize("scalar", [-5, 5]) - def test_python_semantics_with_numexpr_installed(self, op, box, scalar): + def test_python_semantics_with_numexpr_installed( + self, op, box, scalar, monkeypatch + ): # https://github.com/pandas-dev/pandas/issues/36047 - expr._MIN_ELEMENTS = 0 - data = np.arange(-50, 50) - obj = box(data) - method = getattr(obj, op) - result = method(scalar) - - # compare result with numpy - with option_context("compute.use_numexpr", False): - expected = method(scalar) - - tm.assert_equal(result, expected) - - # compare result element-wise with Python - for i, elem in enumerate(data): - if box == DataFrame: - scalar_result = result.iloc[i, 0] - else: - scalar_result = result[i] - try: - expected = getattr(int(elem), op)(scalar) - except ZeroDivisionError: - pass - else: - assert scalar_result == expected + with monkeypatch.context() as m: + m.setattr(expr, "_MIN_ELEMENTS", 0) + data = np.arange(-50, 50) + obj = box(data) + method = getattr(obj, op) + result = method(scalar) + + # compare result with numpy + with option_context("compute.use_numexpr", False): + expected = method(scalar) + + tm.assert_equal(result, expected) + + # compare result element-wise with Python + for i, elem in enumerate(data): + if box == DataFrame: + scalar_result = result.iloc[i, 0] + else: + scalar_result = result[i] + try: + expected = getattr(int(elem), op)(scalar) + except ZeroDivisionError: + pass + else: + assert scalar_result == expected
Scoping the autouse functionality closer where it matters for the tests
https://api.github.com/repos/pandas-dev/pandas/pulls/55269
2023-09-24T20:43:33Z
2023-09-25T18:22:34Z
2023-09-25T18:22:34Z
2023-09-25T18:22:37Z
BUG: groupby.idxmax/idxmin consistently raise on unobserved categorical
diff --git a/.github/workflows/code-checks.yml b/.github/workflows/code-checks.yml index 3bd68c07dcbc3..4260c0836bbea 100644 --- a/.github/workflows/code-checks.yml +++ b/.github/workflows/code-checks.yml @@ -124,7 +124,7 @@ jobs: run: | cd asv_bench asv machine --yes - asv run --quick --dry-run --durations=30 --python=same + asv run --quick --dry-run --durations=30 --python=same --show-stderr build_docker_dev_environment: name: Build Docker Dev Environment diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 017a28ffb573a..50d89abbb286a 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -364,6 +364,7 @@ Plotting Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ +- Bug in :meth:`DataFrameGroupBy.idxmax`, :meth:`DataFrameGroupBy.idxmin`, :meth:`SeriesGroupBy.idxmax`, and :meth:`SeriesGroupBy.idxmin` would not consistently raise when grouping with ``observed=False`` and unobserved categoricals (:issue:`10694`) - Fixed bug in :meth:`DataFrame.resample` not respecting ``closed`` and ``label`` arguments for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55282`) - Fixed bug in :meth:`DataFrame.resample` where bin edges were not correct for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55281`) - Fixed bug in :meth:`DataFrame.resample` where bin edges were not correct for :class:`~pandas.tseries.offsets.MonthBegin` (:issue:`55271`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index e131d689b6a40..210ce8ce9fcbf 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -11910,7 +11910,7 @@ def _logical_func( def any( self, - axis: Axis = 0, + axis: Axis | None = 0, bool_only: bool_t = False, skipna: bool_t = True, **kwargs, diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 904ab9bdfc6dd..a2f556eba08a4 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1185,15 +1185,13 @@ def nsmallest( def idxmin( self, axis: Axis | lib.NoDefault = lib.no_default, skipna: bool = True ) -> Series: - result = self._op_via_apply("idxmin", axis=axis, skipna=skipna) - return result.astype(self.obj.index.dtype) if result.empty else result + return self._idxmax_idxmin("idxmin", axis=axis, skipna=skipna) @doc(Series.idxmax.__doc__) def idxmax( self, axis: Axis | lib.NoDefault = lib.no_default, skipna: bool = True ) -> Series: - result = self._op_via_apply("idxmax", axis=axis, skipna=skipna) - return result.astype(self.obj.index.dtype) if result.empty else result + return self._idxmax_idxmin("idxmax", axis=axis, skipna=skipna) @doc(Series.corr.__doc__) def corr( @@ -2187,22 +2185,9 @@ def idxmax( Beef co2_emissions dtype: object """ - if axis is not lib.no_default: - if axis is None: - axis = self.axis - axis = self.obj._get_axis_number(axis) - self._deprecate_axis(axis, "idxmax") - else: - axis = self.axis - - def func(df): - return df.idxmax(axis=axis, skipna=skipna, numeric_only=numeric_only) - - func.__name__ = "idxmax" - result = self._python_apply_general( - func, self._obj_with_exclusions, not_indexed_same=True + return self._idxmax_idxmin( + "idxmax", axis=axis, numeric_only=numeric_only, skipna=skipna ) - return result.astype(self.obj.index.dtype) if result.empty else result def idxmin( self, @@ -2282,22 +2267,9 @@ def idxmin( Beef consumption dtype: object """ - if axis is not lib.no_default: - if axis is None: - axis = self.axis - axis = self.obj._get_axis_number(axis) - self._deprecate_axis(axis, "idxmin") - else: - axis = self.axis - - def func(df): - return df.idxmin(axis=axis, skipna=skipna, numeric_only=numeric_only) - - func.__name__ = "idxmin" - result = self._python_apply_general( - func, self._obj_with_exclusions, not_indexed_same=True + return self._idxmax_idxmin( + "idxmin", axis=axis, numeric_only=numeric_only, skipna=skipna ) - return result.astype(self.obj.index.dtype) if result.empty else result boxplot = boxplot_frame_groupby diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index a022bfd1bd9bc..e33c4b3579c69 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -2015,10 +2015,14 @@ def _transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs): with com.temp_setattr(self, "as_index", True): # GH#49834 - result needs groups in the index for # _wrap_transform_fast_result - if engine is not None: - kwargs["engine"] = engine - kwargs["engine_kwargs"] = engine_kwargs - result = getattr(self, func)(*args, **kwargs) + if func in ["idxmin", "idxmax"]: + func = cast(Literal["idxmin", "idxmax"], func) + result = self._idxmax_idxmin(func, True, *args, **kwargs) + else: + if engine is not None: + kwargs["engine"] = engine + kwargs["engine_kwargs"] = engine_kwargs + result = getattr(self, func)(*args, **kwargs) return self._wrap_transform_fast_result(result) @@ -5720,6 +5724,113 @@ def sample( sampled_indices = np.concatenate(sampled_indices) return self._selected_obj.take(sampled_indices, axis=self.axis) + def _idxmax_idxmin( + self, + how: Literal["idxmax", "idxmin"], + ignore_unobserved: bool = False, + axis: Axis | None | lib.NoDefault = lib.no_default, + skipna: bool = True, + numeric_only: bool = False, + ): + """Compute idxmax/idxmin. + + Parameters + ---------- + how: {"idxmin", "idxmax"} + Whether to compute idxmin or idxmax. + axis : {{0 or 'index', 1 or 'columns'}}, default None + The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. + If axis is not provided, grouper's axis is used. + numeric_only : bool, default False + Include only float, int, boolean columns. + skipna : bool, default True + Exclude NA/null values. If an entire row/column is NA, the result + will be NA. + ignore_unobserved : bool, default False + When True and an unobserved group is encountered, do not raise. This used + for transform where unobserved groups do not play an impact on the result. + + Returns + ------- + Series or DataFrame + idxmax or idxmin for the groupby operation. + """ + if axis is not lib.no_default: + if axis is None: + axis = self.axis + axis = self.obj._get_axis_number(axis) + self._deprecate_axis(axis, how) + else: + axis = self.axis + + if not self.observed and any( + ping._passed_categorical for ping in self.grouper.groupings + ): + expected_len = np.prod( + [len(ping.group_index) for ping in self.grouper.groupings] + ) + if len(self.grouper.groupings) == 1: + result_len = len(self.grouper.groupings[0].grouping_vector.unique()) + else: + # result_index only contains observed groups in this case + result_len = len(self.grouper.result_index) + assert result_len <= expected_len + has_unobserved = result_len < expected_len + + raise_err: bool | np.bool_ = not ignore_unobserved and has_unobserved + # Only raise an error if there are columns to compute; otherwise we return + # an empty DataFrame with an index (possibly including unobserved) but no + # columns + data = self._obj_with_exclusions + if raise_err and isinstance(data, DataFrame): + if numeric_only: + data = data._get_numeric_data() + raise_err = len(data.columns) > 0 + else: + raise_err = False + if raise_err: + raise ValueError( + f"Can't get {how} of an empty group due to unobserved categories. " + "Specify observed=True in groupby instead." + ) + + try: + if self.obj.ndim == 1: + result = self._op_via_apply(how, skipna=skipna) + else: + + def func(df): + method = getattr(df, how) + return method(axis=axis, skipna=skipna, numeric_only=numeric_only) + + func.__name__ = how + result = self._python_apply_general( + func, self._obj_with_exclusions, not_indexed_same=True + ) + except ValueError as err: + name = "argmax" if how == "idxmax" else "argmin" + if f"attempt to get {name} of an empty sequence" in str(err): + raise ValueError( + f"Can't get {how} of an empty group due to unobserved categories. " + "Specify observed=True in groupby instead." + ) from None + raise + + result = result.astype(self.obj.index.dtype) if result.empty else result + + if not skipna: + has_na_value = result.isnull().any(axis=None) + if has_na_value: + warnings.warn( + f"The behavior of {type(self).__name__}.{how} with all-NA " + "values, or any-NA and skipna=False, is deprecated. In a future " + "version this will raise ValueError", + FutureWarning, + stacklevel=find_stack_level(), + ) + + return result + @doc(GroupBy) def get_groupby( diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index b11240c841420..11291bb89b604 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -1416,6 +1416,15 @@ def test_series_groupby_on_2_categoricals_unobserved(reduction_func, observed): return agg = getattr(series_groupby, reduction_func) + + if not observed and reduction_func in ["idxmin", "idxmax"]: + # idxmin and idxmax are designed to fail on empty inputs + with pytest.raises( + ValueError, match="empty group due to unobserved categories" + ): + agg(*args) + return + result = agg(*args) assert len(result) == expected_length @@ -1448,6 +1457,15 @@ def test_series_groupby_on_2_categoricals_unobserved_zeroes_or_nans( series_groupby = df.groupby(["cat_1", "cat_2"], observed=False)["value"] agg = getattr(series_groupby, reduction_func) + + if reduction_func in ["idxmin", "idxmax"]: + # idxmin and idxmax are designed to fail on empty inputs + with pytest.raises( + ValueError, match="empty group due to unobserved categories" + ): + agg(*args) + return + result = agg(*args) zero_or_nan = _results_for_groupbys_with_missing_categories[reduction_func] @@ -1514,6 +1532,15 @@ def test_dataframe_groupby_on_2_categoricals_when_observed_is_false( df_grp = df.groupby(["cat_1", "cat_2"], observed=observed) args = get_groupby_method_args(reduction_func, df) + + if not observed and reduction_func in ["idxmin", "idxmax"]: + # idxmin and idxmax are designed to fail on empty inputs + with pytest.raises( + ValueError, match="empty group due to unobserved categories" + ): + getattr(df_grp, reduction_func)(*args) + return + res = getattr(df_grp, reduction_func)(*args) expected = _results_for_groupbys_with_missing_categories[reduction_func] @@ -1883,14 +1910,7 @@ def test_category_order_reducer( request, as_index, sort, observed, reduction_func, index_kind, ordered ): # GH#48749 - if ( - reduction_func in ("idxmax", "idxmin") - and not observed - and index_kind != "multi" - ): - msg = "GH#10694 - idxmax/min fail with unused categories" - request.node.add_marker(pytest.mark.xfail(reason=msg)) - elif reduction_func == "corrwith" and not as_index: + if reduction_func == "corrwith" and not as_index: msg = "GH#49950 - corrwith with as_index=False may not have grouping column" request.node.add_marker(pytest.mark.xfail(reason=msg)) elif index_kind != "range" and not as_index: @@ -1912,6 +1932,15 @@ def test_category_order_reducer( df = df.set_index(keys) args = get_groupby_method_args(reduction_func, df) gb = df.groupby(keys, as_index=as_index, sort=sort, observed=observed) + + if not observed and reduction_func in ["idxmin", "idxmax"]: + # idxmin and idxmax are designed to fail on empty inputs + with pytest.raises( + ValueError, match="empty group due to unobserved categories" + ): + getattr(gb, reduction_func)(*args) + return + op_result = getattr(gb, reduction_func)(*args) if as_index: result = op_result.index.get_level_values("a").categories @@ -2114,6 +2143,13 @@ def test_agg_list(request, as_index, observed, reduction_func, test_series, keys gb = gb["b"] args = get_groupby_method_args(reduction_func, df) + if not observed and reduction_func in ["idxmin", "idxmax"] and keys == ["a1", "a2"]: + with pytest.raises( + ValueError, match="empty group due to unobserved categories" + ): + gb.agg([reduction_func], *args) + return + result = gb.agg([reduction_func], *args) expected = getattr(gb, reduction_func)(*args) diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index a92880c87b847..08372541988d0 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -544,6 +544,39 @@ def test_idxmin_idxmax_axis1(): gb2.idxmax(axis=1) +@pytest.mark.parametrize( + "func, values, expected_values, warn", + [ + ("idxmin", [0, 1, 2], [0, 2], None), + ("idxmax", [0, 1, 2], [1, 2], None), + ("idxmin", [0, np.nan, 2], [np.nan, 2], FutureWarning), + ("idxmax", [0, np.nan, 2], [np.nan, 2], FutureWarning), + ("idxmin", [1, 0, np.nan], [1, np.nan], FutureWarning), + ("idxmax", [1, 0, np.nan], [0, np.nan], FutureWarning), + ], +) +@pytest.mark.parametrize("test_series", [True, False]) +def test_idxmin_idxmax_skipna_false(func, values, expected_values, warn, test_series): + # GH#54234 + df = DataFrame( + { + "a": [1, 1, 2], + "b": values, + } + ) + gb = df.groupby("a") + index = Index([1, 2], name="a") + expected = DataFrame({"b": expected_values}, index=index) + if test_series: + gb = gb["b"] + expected = expected["b"] + klass = "Series" if test_series else "DataFrame" + msg = f"The behavior of {klass}GroupBy.{func} with all-NA values" + with tm.assert_produces_warning(warn, match=msg): + result = getattr(gb, func)(skipna=False) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("numeric_only", [True, False, None]) def test_axis1_numeric_only(request, groupby_func, numeric_only): if groupby_func in ("idxmax", "idxmin"): diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 4ca8b0e317bd2..e3e4a7efaa307 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2001,22 +2001,10 @@ def test_pivot_table_values_key_error(): @pytest.mark.parametrize( "op", ["idxmax", "idxmin", "min", "max", "sum", "prod", "skew"] ) -def test_empty_groupby( - columns, keys, values, method, op, request, using_array_manager, dropna -): +def test_empty_groupby(columns, keys, values, method, op, using_array_manager, dropna): # GH8093 & GH26411 override_dtype = None - if ( - isinstance(values, Categorical) - and len(keys) == 1 - and op in ["idxmax", "idxmin"] - ): - mark = pytest.mark.xfail( - raises=ValueError, match="attempt to get arg(min|max) of an empty sequence" - ) - request.node.add_marker(mark) - if isinstance(values, BooleanArray) and op in ["sum", "prod"]: # We expect to get Int64 back for these override_dtype = "Int64" @@ -2061,12 +2049,21 @@ def get_categorical_invalid_expected(): is_dt64 = df.dtypes.iloc[0].kind == "M" is_cat = isinstance(values, Categorical) - if isinstance(values, Categorical) and not values.ordered and op in ["min", "max"]: - msg = f"Cannot perform {op} with non-ordered Categorical" - with pytest.raises(TypeError, match=msg): + if ( + isinstance(values, Categorical) + and not values.ordered + and op in ["min", "max", "idxmin", "idxmax"] + ): + if op in ["min", "max"]: + msg = f"Cannot perform {op} with non-ordered Categorical" + klass = TypeError + else: + msg = f"Can't get {op} of an empty group due to unobserved categories" + klass = ValueError + with pytest.raises(klass, match=msg): get_result() - if isinstance(columns, list): + if op in ["min", "max"] and isinstance(columns, list): # i.e. DataframeGroupBy, not SeriesGroupBy result = get_result(numeric_only=True) expected = get_categorical_invalid_expected() diff --git a/pandas/tests/groupby/test_groupby_dropna.py b/pandas/tests/groupby/test_groupby_dropna.py index d82278c277d48..8065aa63dff81 100644 --- a/pandas/tests/groupby/test_groupby_dropna.py +++ b/pandas/tests/groupby/test_groupby_dropna.py @@ -503,18 +503,7 @@ def test_null_is_null_for_dtype( @pytest.mark.parametrize("index_kind", ["range", "single", "multi"]) -def test_categorical_reducers( - request, reduction_func, observed, sort, as_index, index_kind -): - # GH#36327 - if ( - reduction_func in ("idxmin", "idxmax") - and not observed - and index_kind != "multi" - ): - msg = "GH#10694 - idxmin/max broken for categorical with observed=False" - request.node.add_marker(pytest.mark.xfail(reason=msg)) - +def test_categorical_reducers(reduction_func, observed, sort, as_index, index_kind): # Ensure there is at least one null value by appending to the end values = np.append(np.random.default_rng(2).choice([1, 2, None], size=19), None) df = pd.DataFrame( @@ -544,6 +533,17 @@ def test_categorical_reducers( args = (args[0].drop(columns=keys),) args_filled = (args_filled[0].drop(columns=keys),) + gb_keepna = df.groupby( + keys, dropna=False, observed=observed, sort=sort, as_index=as_index + ) + + if not observed and reduction_func in ["idxmin", "idxmax"]: + with pytest.raises( + ValueError, match="empty group due to unobserved categories" + ): + getattr(gb_keepna, reduction_func)(*args) + return + gb_filled = df_filled.groupby(keys, observed=observed, sort=sort, as_index=True) expected = getattr(gb_filled, reduction_func)(*args_filled).reset_index() expected["x"] = expected["x"].replace(4, None) @@ -573,9 +573,6 @@ def test_categorical_reducers( if as_index: expected = expected["size"].rename(None) - gb_keepna = df.groupby( - keys, dropna=False, observed=observed, sort=sort, as_index=as_index - ) if as_index or index_kind == "range" or reduction_func == "size": warn = None else: diff --git a/pandas/tests/groupby/test_raises.py b/pandas/tests/groupby/test_raises.py index f9a2b3d44b117..46bf324fad1d7 100644 --- a/pandas/tests/groupby/test_raises.py +++ b/pandas/tests/groupby/test_raises.py @@ -97,22 +97,24 @@ def df_with_cat_col(): return df -def _call_and_check(klass, msg, how, gb, groupby_func, args): - if klass is None: - if how == "method": - getattr(gb, groupby_func)(*args) - elif how == "agg": - gb.agg(groupby_func, *args) - else: - gb.transform(groupby_func, *args) - else: - with pytest.raises(klass, match=msg): +def _call_and_check(klass, msg, how, gb, groupby_func, args, warn_msg=""): + warn_klass = None if warn_msg == "" else FutureWarning + with tm.assert_produces_warning(warn_klass, match=warn_msg): + if klass is None: if how == "method": getattr(gb, groupby_func)(*args) elif how == "agg": gb.agg(groupby_func, *args) else: gb.transform(groupby_func, *args) + else: + with pytest.raises(klass, match=msg): + if how == "method": + getattr(gb, groupby_func)(*args) + elif how == "agg": + gb.agg(groupby_func, *args) + else: + gb.transform(groupby_func, *args) @pytest.mark.parametrize("how", ["method", "agg", "transform"]) @@ -233,8 +235,7 @@ def test_groupby_raises_string_np( warn_msg = "using SeriesGroupBy.[sum|mean]" else: warn_msg = "using DataFrameGroupBy.[sum|mean]" - with tm.assert_produces_warning(FutureWarning, match=warn_msg): - _call_and_check(klass, msg, how, gb, groupby_func_np, ()) + _call_and_check(klass, msg, how, gb, groupby_func_np, (), warn_msg=warn_msg) @pytest.mark.parametrize("how", ["method", "agg", "transform"]) @@ -297,13 +298,11 @@ def test_groupby_raises_datetime( "var": (TypeError, "datetime64 type does not support var operations"), }[groupby_func] - warn = None - warn_msg = f"'{groupby_func}' with datetime64 dtypes is deprecated" if groupby_func in ["any", "all"]: - warn = FutureWarning - - with tm.assert_produces_warning(warn, match=warn_msg): - _call_and_check(klass, msg, how, gb, groupby_func, args) + warn_msg = f"'{groupby_func}' with datetime64 dtypes is deprecated" + else: + warn_msg = "" + _call_and_check(klass, msg, how, gb, groupby_func, args, warn_msg=warn_msg) @pytest.mark.parametrize("how", ["agg", "transform"]) @@ -342,8 +341,7 @@ def test_groupby_raises_datetime_np( warn_msg = "using SeriesGroupBy.[sum|mean]" else: warn_msg = "using DataFrameGroupBy.[sum|mean]" - with tm.assert_produces_warning(FutureWarning, match=warn_msg): - _call_and_check(klass, msg, how, gb, groupby_func_np, ()) + _call_and_check(klass, msg, how, gb, groupby_func_np, (), warn_msg=warn_msg) @pytest.mark.parametrize("func", ["prod", "cumprod", "skew", "var"]) @@ -540,8 +538,7 @@ def test_groupby_raises_category_np( warn_msg = "using SeriesGroupBy.[sum|mean]" else: warn_msg = "using DataFrameGroupBy.[sum|mean]" - with tm.assert_produces_warning(FutureWarning, match=warn_msg): - _call_and_check(klass, msg, how, gb, groupby_func_np, ()) + _call_and_check(klass, msg, how, gb, groupby_func_np, (), warn_msg=warn_msg) @pytest.mark.parametrize("how", ["method", "agg", "transform"]) @@ -572,6 +569,16 @@ def test_groupby_raises_category_on_category( return empty_groups = any(group.empty for group in gb.groups.values()) + if ( + not observed + and how != "transform" + and isinstance(by, list) + and isinstance(by[0], str) + and by == ["a", "b"] + ): + assert not empty_groups + # TODO: empty_groups should be true due to unobserved categorical combinations + empty_groups = True klass, msg = { "all": (None, ""), @@ -617,10 +624,10 @@ def test_groupby_raises_category_on_category( if not using_copy_on_write else (None, ""), # no-op with CoW "first": (None, ""), - "idxmax": (ValueError, "attempt to get argmax of an empty sequence") + "idxmax": (ValueError, "empty group due to unobserved categories") if empty_groups else (None, ""), - "idxmin": (ValueError, "attempt to get argmin of an empty sequence") + "idxmin": (ValueError, "empty group due to unobserved categories") if empty_groups else (None, ""), "last": (None, ""), diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py index cf3f41e04902c..4a493ef3fd52c 100644 --- a/pandas/tests/groupby/transform/test_transform.py +++ b/pandas/tests/groupby/transform/test_transform.py @@ -1637,3 +1637,19 @@ def test_as_index_no_change(keys, df, groupby_func): result = gb_as_index_true.transform(groupby_func, *args) expected = gb_as_index_false.transform(groupby_func, *args) tm.assert_equal(result, expected) + + +@pytest.mark.parametrize("how", ["idxmax", "idxmin"]) +@pytest.mark.parametrize("numeric_only", [True, False]) +def test_idxmin_idxmax_transform_args(how, skipna, numeric_only): + # GH#55268 - ensure *args are passed through when calling transform + df = DataFrame({"a": [1, 1, 1, 2], "b": [3.0, 4.0, np.nan, 6.0], "c": list("abcd")}) + gb = df.groupby("a") + msg = f"'axis' keyword in DataFrameGroupBy.{how} is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = gb.transform(how, 0, skipna, numeric_only) + warn = None if skipna else FutureWarning + msg = f"The behavior of DataFrame.{how} with .* any-NA and skipna=False" + with tm.assert_produces_warning(warn, match=msg): + expected = gb.transform(how, skipna=skipna, numeric_only=numeric_only) + tm.assert_frame_equal(result, expected)
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Part of #10694, but doesn't close it fully. - When given a non-ordered category with unobserved categories, we currently raise about the unobserved categories. We should instead raise about the non-orderedness for consistency with min/max. - _python_apply_general does not return the correct dtype when there is a CategoricalIndex will NaN values. - _python_apply_general fails on a single grouping with unobserved categories even when we're call it from transform where the unobserved categories should have no impact. - _python_apply_general with an empty DataFrame with no numeric columns returns all the columns even when `numeric_only=True`. All of these are fixed in #54234
https://api.github.com/repos/pandas-dev/pandas/pulls/55268
2023-09-24T20:10:46Z
2023-10-08T18:49:58Z
2023-10-08T18:49:58Z
2023-10-09T20:25:11Z
TST: Don't call gc.collect for plotting tests
diff --git a/pandas/tests/io/formats/style/test_matplotlib.py b/pandas/tests/io/formats/style/test_matplotlib.py index fb7a77f1ddb27..058b225075e36 100644 --- a/pandas/tests/io/formats/style/test_matplotlib.py +++ b/pandas/tests/io/formats/style/test_matplotlib.py @@ -1,5 +1,3 @@ -import gc - import numpy as np import pytest @@ -33,8 +31,6 @@ def mpl_cleanup(): mpl_units.registry.clear() mpl_units.registry.update(orig_units_registry) plt.close("all") - # https://matplotlib.org/stable/users/prev_whats_new/whats_new_3.6.0.html#garbage-collection-is-no-longer-run-on-figure-close # noqa: E501 - gc.collect(1) @pytest.fixture diff --git a/pandas/tests/plotting/conftest.py b/pandas/tests/plotting/conftest.py index d688bbd47595c..fb9fbfe674efe 100644 --- a/pandas/tests/plotting/conftest.py +++ b/pandas/tests/plotting/conftest.py @@ -1,5 +1,3 @@ -import gc - import numpy as np import pytest @@ -25,8 +23,6 @@ def mpl_cleanup(): mpl_units.registry.clear() mpl_units.registry.update(orig_units_registry) plt.close("all") - # https://matplotlib.org/stable/users/prev_whats_new/whats_new_3.6.0.html#garbage-collection-is-no-longer-run-on-figure-close # noqa: E501 - gc.collect(1) @pytest.fixture diff --git a/pandas/tests/plotting/frame/test_frame.py b/pandas/tests/plotting/frame/test_frame.py index 47114cab47619..606e22994e678 100644 --- a/pandas/tests/plotting/frame/test_frame.py +++ b/pandas/tests/plotting/frame/test_frame.py @@ -3,7 +3,6 @@ date, datetime, ) -import gc import itertools import re import string @@ -2050,8 +2049,6 @@ def test_memory_leak(self, kind): # have matplotlib delete all the figures plt.close("all") - # force a garbage collection - gc.collect() assert ref() is None def test_df_gridspec_patterns_vert_horiz(self):
This may be slowing down test run time while Python may be garbage collecting dead objects already
https://api.github.com/repos/pandas-dev/pandas/pulls/55267
2023-09-24T18:20:29Z
2023-09-25T01:58:38Z
null
2023-09-25T01:58:41Z
TST: Load iris and types data in sql tests as needed
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 63546b44e92be..c30965feeb586 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -24,7 +24,6 @@ import pandas as pd from pandas import ( DataFrame, - DatetimeTZDtype, Index, MultiIndex, Series, @@ -86,17 +85,18 @@ def sql_strings(): } -def iris_table_metadata(dialect: str): +def iris_table_metadata(): + import sqlalchemy from sqlalchemy import ( - REAL, Column, + Double, Float, MetaData, String, Table, ) - dtype = Float if dialect == "postgresql" else REAL + dtype = Double if Version(sqlalchemy.__version__) >= Version("2.0.0") else Float metadata = MetaData() iris = Table( "iris", @@ -127,11 +127,11 @@ def create_and_load_iris_sqlite3(conn: sqlite3.Connection, iris_file: Path): cur.executemany(stmt, reader) -def create_and_load_iris(conn, iris_file: Path, dialect: str): +def create_and_load_iris(conn, iris_file: Path): from sqlalchemy import insert from sqlalchemy.engine import Engine - iris = iris_table_metadata(dialect) + iris = iris_table_metadata() with iris_file.open(newline=None, encoding="utf-8") as csvfile: reader = csv.reader(csvfile) @@ -198,8 +198,6 @@ def types_table_metadata(dialect: str): Column("IntColWithNull", Integer), Column("BoolColWithNull", bool_type), ) - if dialect == "postgresql": - types.append_column(Column("DateColWithTz", DateTime(timezone=True))) return types @@ -245,6 +243,51 @@ def create_and_load_types(conn, types_data: list[dict], dialect: str): conn.execute(stmt) +def create_and_load_postgres_datetz(conn): + from sqlalchemy import ( + Column, + DateTime, + MetaData, + Table, + insert, + ) + from sqlalchemy.engine import Engine + + metadata = MetaData() + datetz = Table("datetz", metadata, Column("DateColWithTz", DateTime(timezone=True))) + datetz_data = [ + { + "DateColWithTz": "2000-01-01 00:00:00-08:00", + }, + { + "DateColWithTz": "2000-06-01 00:00:00-07:00", + }, + ] + stmt = insert(datetz).values(datetz_data) + if isinstance(conn, Engine): + with conn.connect() as conn: + with conn.begin(): + datetz.drop(conn, checkfirst=True) + datetz.create(bind=conn) + conn.execute(stmt) + else: + with conn.begin(): + datetz.drop(conn, checkfirst=True) + datetz.create(bind=conn) + conn.execute(stmt) + + # "2000-01-01 00:00:00-08:00" should convert to + # "2000-01-01 08:00:00" + # "2000-06-01 00:00:00-07:00" should convert to + # "2000-06-01 07:00:00" + # GH 6415 + expected_data = [ + Timestamp("2000-01-01 08:00:00", tz="UTC"), + Timestamp("2000-06-01 07:00:00", tz="UTC"), + ] + return Series(expected_data, name="DateColWithTz") + + def check_iris_frame(frame: DataFrame): pytype = frame.dtypes.iloc[0].type row = frame.iloc[0] @@ -295,7 +338,6 @@ def types_data(): "BoolCol": False, "IntColWithNull": 1, "BoolColWithNull": False, - "DateColWithTz": "2000-01-01 00:00:00-08:00", }, { "TextCol": "first", @@ -307,7 +349,6 @@ def types_data(): "BoolCol": False, "IntColWithNull": None, "BoolColWithNull": None, - "DateColWithTz": "2000-06-01 00:00:00-07:00", }, ] @@ -429,7 +470,7 @@ def drop_view( @pytest.fixture -def mysql_pymysql_engine(iris_path, types_data): +def mysql_pymysql_engine(): sqlalchemy = pytest.importorskip("sqlalchemy") pymysql = pytest.importorskip("pymysql") engine = sqlalchemy.create_engine( @@ -437,15 +478,6 @@ def mysql_pymysql_engine(iris_path, types_data): connect_args={"client_flag": pymysql.constants.CLIENT.MULTI_STATEMENTS}, poolclass=sqlalchemy.pool.NullPool, ) - insp = sqlalchemy.inspect(engine) - if not insp.has_table("iris"): - create_and_load_iris(engine, iris_path, "mysql") - if not insp.has_table("types"): - for entry in types_data: - entry.pop("DateColWithTz") - create_and_load_types(engine, types_data, "mysql") - if not insp.has_table("iris_view"): - create_and_load_iris_view(engine) yield engine for view in get_all_views(engine): drop_view(view, engine) @@ -455,26 +487,44 @@ def mysql_pymysql_engine(iris_path, types_data): @pytest.fixture -def mysql_pymysql_conn(iris_path, mysql_pymysql_engine): +def mysql_pymysql_engine_iris(mysql_pymysql_engine, iris_path): + create_and_load_iris(mysql_pymysql_engine, iris_path) + create_and_load_iris_view(mysql_pymysql_engine) + yield mysql_pymysql_engine + + +@pytest.fixture +def mysql_pymysql_engine_types(mysql_pymysql_engine, types_data): + create_and_load_types(mysql_pymysql_engine, types_data, "mysql") + yield mysql_pymysql_engine + + +@pytest.fixture +def mysql_pymysql_conn(mysql_pymysql_engine): with mysql_pymysql_engine.connect() as conn: yield conn @pytest.fixture -def postgresql_psycopg2_engine(iris_path, types_data): +def mysql_pymysql_conn_iris(mysql_pymysql_engine_iris): + with mysql_pymysql_engine_iris.connect() as conn: + yield conn + + +@pytest.fixture +def mysql_pymysql_conn_types(mysql_pymysql_engine_types): + with mysql_pymysql_engine_types.connect() as conn: + yield conn + + +@pytest.fixture +def postgresql_psycopg2_engine(): sqlalchemy = pytest.importorskip("sqlalchemy") pytest.importorskip("psycopg2") engine = sqlalchemy.create_engine( "postgresql+psycopg2://postgres:postgres@localhost:5432/pandas", poolclass=sqlalchemy.pool.NullPool, ) - insp = sqlalchemy.inspect(engine) - if not insp.has_table("iris"): - create_and_load_iris(engine, iris_path, "postgresql") - if not insp.has_table("types"): - create_and_load_types(engine, types_data, "postgresql") - if not insp.has_table("iris_view"): - create_and_load_iris_view(engine) yield engine for view in get_all_views(engine): drop_view(view, engine) @@ -483,34 +533,48 @@ def postgresql_psycopg2_engine(iris_path, types_data): engine.dispose() +@pytest.fixture +def postgresql_psycopg2_engine_iris(postgresql_psycopg2_engine, iris_path): + create_and_load_iris(postgresql_psycopg2_engine, iris_path) + create_and_load_iris_view(postgresql_psycopg2_engine) + yield postgresql_psycopg2_engine + + +@pytest.fixture +def postgresql_psycopg2_engine_types(postgresql_psycopg2_engine, types_data): + create_and_load_types(postgresql_psycopg2_engine, types_data, "postgres") + yield postgresql_psycopg2_engine + + @pytest.fixture def postgresql_psycopg2_conn(postgresql_psycopg2_engine): with postgresql_psycopg2_engine.connect() as conn: yield conn +@pytest.fixture +def postgresql_psycopg2_conn_iris(postgresql_psycopg2_engine_iris): + with postgresql_psycopg2_engine_iris.connect() as conn: + yield conn + + +@pytest.fixture +def postgresql_psycopg2_conn_types(postgresql_psycopg2_engine_types): + with postgresql_psycopg2_engine_types.connect() as conn: + yield conn + + @pytest.fixture def sqlite_str(): pytest.importorskip("sqlalchemy") with tm.ensure_clean() as name: - yield "sqlite:///" + name + yield f"sqlite:///{name}" @pytest.fixture -def sqlite_engine(sqlite_str, iris_path, types_data): +def sqlite_engine(sqlite_str): sqlalchemy = pytest.importorskip("sqlalchemy") engine = sqlalchemy.create_engine(sqlite_str, poolclass=sqlalchemy.pool.NullPool) - - insp = sqlalchemy.inspect(engine) - if not insp.has_table("iris"): - create_and_load_iris(engine, iris_path, "sqlite") - if not insp.has_table("iris_view"): - create_and_load_iris_view(engine) - if not insp.has_table("types"): - for entry in types_data: - entry.pop("DateColWithTz") - create_and_load_types(engine, types_data, "sqlite") - yield engine for view in get_all_views(engine): drop_view(view, engine) @@ -526,74 +590,68 @@ def sqlite_conn(sqlite_engine): @pytest.fixture -def sqlite_iris_str(sqlite_str, iris_path, types_data): +def sqlite_str_iris(sqlite_str, iris_path): sqlalchemy = pytest.importorskip("sqlalchemy") engine = sqlalchemy.create_engine(sqlite_str) + create_and_load_iris(engine, iris_path) + create_and_load_iris_view(engine) + engine.dispose() + return sqlite_str + - insp = sqlalchemy.inspect(engine) - if not insp.has_table("iris"): - create_and_load_iris(engine, iris_path, "sqlite") - if not insp.has_table("iris_view"): - create_and_load_iris_view(engine) - if not insp.has_table("types"): - for entry in types_data: - entry.pop("DateColWithTz") - create_and_load_types(engine, types_data, "sqlite") +@pytest.fixture +def sqlite_engine_iris(sqlite_engine, iris_path): + create_and_load_iris(sqlite_engine, iris_path) + create_and_load_iris_view(sqlite_engine) + yield sqlite_engine + + +@pytest.fixture +def sqlite_conn_iris(sqlite_engine_iris): + with sqlite_engine_iris.connect() as conn: + yield conn + + +@pytest.fixture +def sqlite_str_types(sqlite_str, types_data): + sqlalchemy = pytest.importorskip("sqlalchemy") + engine = sqlalchemy.create_engine(sqlite_str) + create_and_load_types(engine, types_data, "sqlite") engine.dispose() return sqlite_str @pytest.fixture -def sqlite_iris_engine(sqlite_engine, iris_path): - return sqlite_engine +def sqlite_engine_types(sqlite_engine, types_data): + create_and_load_types(sqlite_engine, types_data, "sqlite") + yield sqlite_engine @pytest.fixture -def sqlite_iris_conn(sqlite_iris_engine): - with sqlite_iris_engine.connect() as conn: +def sqlite_conn_types(sqlite_engine_types): + with sqlite_engine_types.connect() as conn: yield conn @pytest.fixture def sqlite_buildin(): with contextlib.closing(sqlite3.connect(":memory:")) as closing_conn: - create_and_load_iris_view(closing_conn) with closing_conn as conn: yield conn @pytest.fixture -def sqlite_sqlalchemy_memory_engine(iris_path, types_data): - sqlalchemy = pytest.importorskip("sqlalchemy") - engine = sqlalchemy.create_engine("sqlite:///:memory:") - - insp = sqlalchemy.inspect(engine) - if not insp.has_table("iris"): - create_and_load_iris(engine, iris_path, "sqlite") - if not insp.has_table("iris_view"): - create_and_load_iris_view(engine) - if not insp.has_table("types"): - for entry in types_data: - entry.pop("DateColWithTz") - create_and_load_types(engine, types_data, "sqlite") - - yield engine - for view in get_all_views(engine): - drop_view(view, engine) - for tbl in get_all_tables(engine): - drop_table(tbl, engine) +def sqlite_buildin_iris(sqlite_buildin, iris_path): + create_and_load_iris_sqlite3(sqlite_buildin, iris_path) + create_and_load_iris_view(sqlite_buildin) + yield sqlite_buildin @pytest.fixture -def sqlite_buildin_iris(sqlite_buildin, iris_path, types_data): - create_and_load_iris_sqlite3(sqlite_buildin, iris_path) - - for entry in types_data: - entry.pop("DateColWithTz") +def sqlite_buildin_types(sqlite_buildin, types_data): types_data = [tuple(entry.values()) for entry in types_data] - create_and_load_types_sqlite3(sqlite_buildin, types_data) - return sqlite_buildin + yield sqlite_buildin mysql_connectable = [ @@ -601,39 +659,64 @@ def sqlite_buildin_iris(sqlite_buildin, iris_path, types_data): pytest.param("mysql_pymysql_conn", marks=pytest.mark.db), ] +mysql_connectable_iris = [ + pytest.param("mysql_pymysql_engine_iris", marks=pytest.mark.db), + pytest.param("mysql_pymysql_conn_iris", marks=pytest.mark.db), +] + +mysql_connectable_types = [ + pytest.param("mysql_pymysql_engine_types", marks=pytest.mark.db), + pytest.param("mysql_pymysql_conn_types", marks=pytest.mark.db), +] postgresql_connectable = [ pytest.param("postgresql_psycopg2_engine", marks=pytest.mark.db), pytest.param("postgresql_psycopg2_conn", marks=pytest.mark.db), ] +postgresql_connectable_iris = [ + pytest.param("postgresql_psycopg2_engine_iris", marks=pytest.mark.db), + pytest.param("postgresql_psycopg2_conn_iris", marks=pytest.mark.db), +] + +postgresql_connectable_types = [ + pytest.param("postgresql_psycopg2_engine_types", marks=pytest.mark.db), + pytest.param("postgresql_psycopg2_conn_types", marks=pytest.mark.db), +] + sqlite_connectable = [ - pytest.param("sqlite_engine", marks=pytest.mark.db), - pytest.param("sqlite_conn", marks=pytest.mark.db), - pytest.param("sqlite_str", marks=pytest.mark.db), + "sqlite_engine", + "sqlite_conn", + "sqlite_str", +] + +sqlite_connectable_iris = [ + "sqlite_engine_iris", + "sqlite_conn_iris", + "sqlite_str_iris", ] -sqlite_iris_connectable = [ - pytest.param("sqlite_iris_engine", marks=pytest.mark.db), - pytest.param("sqlite_iris_conn", marks=pytest.mark.db), - pytest.param("sqlite_iris_str", marks=pytest.mark.db), +sqlite_connectable_types = [ + "sqlite_engine_types", + "sqlite_conn_types", + "sqlite_str_types", ] sqlalchemy_connectable = mysql_connectable + postgresql_connectable + sqlite_connectable sqlalchemy_connectable_iris = ( - mysql_connectable + postgresql_connectable + sqlite_iris_connectable + mysql_connectable_iris + postgresql_connectable_iris + sqlite_connectable_iris ) -all_connectable = sqlalchemy_connectable + [ - "sqlite_buildin", - "sqlite_sqlalchemy_memory_engine", -] +sqlalchemy_connectable_types = ( + mysql_connectable_types + postgresql_connectable_types + sqlite_connectable_types +) -all_connectable_iris = sqlalchemy_connectable_iris + [ - "sqlite_buildin_iris", - "sqlite_sqlalchemy_memory_engine", -] +all_connectable = sqlalchemy_connectable + ["sqlite_buildin"] + +all_connectable_iris = sqlalchemy_connectable_iris + ["sqlite_buildin_iris"] + +all_connectable_types = sqlalchemy_connectable_types + ["sqlite_buildin_types"] @pytest.mark.parametrize("conn", all_connectable) @@ -813,10 +896,10 @@ def sample(pd_table, conn, keys, data_iter): assert count_rows(conn, "test_frame") == len(test_frame1) -@pytest.mark.parametrize("conn", all_connectable_iris) +@pytest.mark.parametrize("conn", all_connectable_types) def test_default_type_conversion(conn, request): conn_name = conn - if conn_name == "sqlite_buildin_iris": + if conn_name == "sqlite_buildin_types": request.applymarker( pytest.mark.xfail( reason="sqlite_buildin connection does not implement read_sql_table" @@ -1093,43 +1176,39 @@ def test_read_view_sqlite(sqlite_buildin): tm.assert_frame_equal(result, expected) -def test_execute_typeerror(sqlite_iris_engine): +def test_execute_typeerror(sqlite_engine_iris): with pytest.raises(TypeError, match="pandas.io.sql.execute requires a connection"): with tm.assert_produces_warning( FutureWarning, match="`pandas.io.sql.execute` is deprecated and " "will be removed in the future version.", ): - sql.execute("select * from iris", sqlite_iris_engine) + sql.execute("select * from iris", sqlite_engine_iris) -def test_execute_deprecated(sqlite_buildin_iris): +def test_execute_deprecated(sqlite_conn_iris): # GH50185 with tm.assert_produces_warning( FutureWarning, match="`pandas.io.sql.execute` is deprecated and " "will be removed in the future version.", ): - sql.execute("select * from iris", sqlite_buildin_iris) + sql.execute("select * from iris", sqlite_conn_iris) -@pytest.fixture -def flavor(): - def func(conn_name): - if "postgresql" in conn_name: - return "postgresql" - elif "sqlite" in conn_name: - return "sqlite" - elif "mysql" in conn_name: - return "mysql" - - raise ValueError(f"unsupported connection: {conn_name}") +def flavor(conn_name): + if "postgresql" in conn_name: + return "postgresql" + elif "sqlite" in conn_name: + return "sqlite" + elif "mysql" in conn_name: + return "mysql" - return func + raise ValueError(f"unsupported connection: {conn_name}") @pytest.mark.parametrize("conn", all_connectable_iris) -def test_read_sql_iris_parameter(conn, request, sql_strings, flavor): +def test_read_sql_iris_parameter(conn, request, sql_strings): conn_name = conn conn = request.getfixturevalue(conn) query = sql_strings["read_parameters"][flavor(conn_name)] @@ -1141,19 +1220,19 @@ def test_read_sql_iris_parameter(conn, request, sql_strings, flavor): @pytest.mark.parametrize("conn", all_connectable_iris) -def test_read_sql_iris_named_parameter(conn, request, sql_strings, flavor): +def test_read_sql_iris_named_parameter(conn, request, sql_strings): conn_name = conn conn = request.getfixturevalue(conn) query = sql_strings["read_named_parameters"][flavor(conn_name)] params = {"name": "Iris-setosa", "length": 5.1} - pandasSQL = pandasSQL_builder(conn) - with pandasSQL.run_transaction(): - iris_frame = pandasSQL.read_query(query, params=params) + with pandasSQL_builder(conn) as pandasSQL: + with pandasSQL.run_transaction(): + iris_frame = pandasSQL.read_query(query, params=params) check_iris_frame(iris_frame) @pytest.mark.parametrize("conn", all_connectable_iris) -def test_read_sql_iris_no_parameter_with_percent(conn, request, sql_strings, flavor): +def test_read_sql_iris_no_parameter_with_percent(conn, request, sql_strings): if "mysql" in conn or "postgresql" in conn: request.applymarker(pytest.mark.xfail(reason="broken test")) @@ -1322,7 +1401,7 @@ def test_api_execute_sql(conn, request): tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"]) -@pytest.mark.parametrize("conn", all_connectable_iris) +@pytest.mark.parametrize("conn", all_connectable_types) def test_api_date_parsing(conn, request): conn_name = conn conn = request.getfixturevalue(conn) @@ -1378,7 +1457,7 @@ def test_api_date_parsing(conn, request): ] -@pytest.mark.parametrize("conn", all_connectable_iris) +@pytest.mark.parametrize("conn", all_connectable_types) @pytest.mark.parametrize("error", ["ignore", "raise", "coerce"]) @pytest.mark.parametrize( "read_sql, text, mode", @@ -1398,7 +1477,7 @@ def test_api_custom_dateparsing_error( ): conn_name = conn conn = request.getfixturevalue(conn) - if text == "types" and conn_name == "sqlite_buildin_iris": + if text == "types" and conn_name == "sqlite_buildin_types": request.applymarker( pytest.mark.xfail(reason="failing combination of arguments") ) @@ -1414,14 +1493,13 @@ def test_api_custom_dateparsing_error( ) if "postgres" in conn_name: # TODO: clean up types_data_frame fixture - result = result.drop(columns=["DateColWithTz"]) result["BoolCol"] = result["BoolCol"].astype(int) result["BoolColWithNull"] = result["BoolColWithNull"].astype(float) tm.assert_frame_equal(result, expected) -@pytest.mark.parametrize("conn", all_connectable_iris) +@pytest.mark.parametrize("conn", all_connectable_types) def test_api_date_and_index(conn, request): # Test case where same column appears in parse_date and index_col conn = request.getfixturevalue(conn) @@ -2022,7 +2100,7 @@ def test_query_by_select_obj(conn, request): select, ) - iris = iris_table_metadata("postgres") + iris = iris_table_metadata() name_select = select(iris).where(iris.c.Name == bindparam("name")) iris_df = sql.read_sql(name_select, conn, params={"name": "Iris-setosa"}) all_names = set(iris_df["Name"]) @@ -2188,7 +2266,6 @@ def test_roundtrip(conn, request, test_frame1): @pytest.mark.parametrize("conn", all_connectable_iris) def test_execute_sql(conn, request): conn = request.getfixturevalue(conn) - pandasSQL = pandasSQL_builder(conn) with pandasSQL_builder(conn) as pandasSQL: with pandasSQL.run_transaction(): iris_results = pandasSQL.execute("SELECT * FROM iris") @@ -2220,7 +2297,7 @@ def test_read_table_absent_raises(conn, request): sql.read_sql_table("this_doesnt_exist", con=conn) -@pytest.mark.parametrize("conn", sqlalchemy_connectable) +@pytest.mark.parametrize("conn", sqlalchemy_connectable_types) def test_sqlalchemy_default_type_conversion(conn, request): conn_name = conn if conn_name == "sqlite_str": @@ -2254,7 +2331,7 @@ def test_bigint(conn, request): tm.assert_frame_equal(df, result) -@pytest.mark.parametrize("conn", sqlalchemy_connectable) +@pytest.mark.parametrize("conn", sqlalchemy_connectable_types) def test_default_date_load(conn, request): conn_name = conn if conn_name == "sqlite_str": @@ -2270,82 +2347,40 @@ def test_default_date_load(conn, request): assert issubclass(df.DateCol.dtype.type, np.datetime64) -@pytest.mark.parametrize("conn", sqlalchemy_connectable_iris) -def test_datetime_with_timezone(conn, request): +@pytest.mark.parametrize("conn", postgresql_connectable) +@pytest.mark.parametrize("parse_dates", [None, ["DateColWithTz"]]) +def test_datetime_with_timezone_query(conn, request, parse_dates): # edge case that converts postgresql datetime with time zone types # to datetime64[ns,psycopg2.tz.FixedOffsetTimezone..], which is ok # but should be more natural, so coerce to datetime64[ns] for now - - def check(col): - # check that a column is either datetime64[ns] - # or datetime64[ns, UTC] - if lib.is_np_dtype(col.dtype, "M"): - # "2000-01-01 00:00:00-08:00" should convert to - # "2000-01-01 08:00:00" - assert col[0] == Timestamp("2000-01-01 08:00:00") - - # "2000-06-01 00:00:00-07:00" should convert to - # "2000-06-01 07:00:00" - assert col[1] == Timestamp("2000-06-01 07:00:00") - - elif isinstance(col.dtype, DatetimeTZDtype): - assert str(col.dt.tz) == "UTC" - - # "2000-01-01 00:00:00-08:00" should convert to - # "2000-01-01 08:00:00" - # "2000-06-01 00:00:00-07:00" should convert to - # "2000-06-01 07:00:00" - # GH 6415 - expected_data = [ - Timestamp("2000-01-01 08:00:00", tz="UTC"), - Timestamp("2000-06-01 07:00:00", tz="UTC"), - ] - expected = Series(expected_data, name=col.name) - tm.assert_series_equal(col, expected) - - else: - raise AssertionError(f"DateCol loaded with incorrect type -> {col.dtype}") - - # GH11216 conn = request.getfixturevalue(conn) - df = read_sql_query("select * from types", conn) - if not hasattr(df, "DateColWithTz"): - request.applymarker( - pytest.mark.xfail(reason="no column with datetime with time zone") - ) + expected = create_and_load_postgres_datetz(conn) - # this is parsed on Travis (linux), but not on macosx for some reason - # even with the same versions of psycopg2 & sqlalchemy, possibly a - # Postgresql server version difference + # GH11216 + df = read_sql_query("select * from datetz", conn, parse_dates=parse_dates) col = df.DateColWithTz - assert isinstance(col.dtype, DatetimeTZDtype) + tm.assert_series_equal(col, expected) - df = read_sql_query("select * from types", conn, parse_dates=["DateColWithTz"]) - if not hasattr(df, "DateColWithTz"): - request.applymarker( - pytest.mark.xfail(reason="no column with datetime with time zone") - ) - col = df.DateColWithTz - assert isinstance(col.dtype, DatetimeTZDtype) - assert str(col.dt.tz) == "UTC" - check(df.DateColWithTz) + +@pytest.mark.parametrize("conn", postgresql_connectable) +def test_datetime_with_timezone_query_chunksize(conn, request): + conn = request.getfixturevalue(conn) + expected = create_and_load_postgres_datetz(conn) df = concat( - list(read_sql_query("select * from types", conn, chunksize=1)), + list(read_sql_query("select * from datetz", conn, chunksize=1)), ignore_index=True, ) col = df.DateColWithTz - assert isinstance(col.dtype, DatetimeTZDtype) - assert str(col.dt.tz) == "UTC" - expected = sql.read_sql_table("types", conn) - col = expected.DateColWithTz - assert isinstance(col.dtype, DatetimeTZDtype) - tm.assert_series_equal(df.DateColWithTz, expected.DateColWithTz) - - # xref #7139 - # this might or might not be converted depending on the postgres driver - df = sql.read_sql_table("types", conn) - check(df.DateColWithTz) + tm.assert_series_equal(col, expected) + + +@pytest.mark.parametrize("conn", postgresql_connectable) +def test_datetime_with_timezone_table(conn, request): + conn = request.getfixturevalue(conn) + expected = create_and_load_postgres_datetz(conn) + result = sql.read_sql_table("datetz", conn) + tm.assert_frame_equal(result, expected.to_frame()) @pytest.mark.parametrize("conn", sqlalchemy_connectable) @@ -2403,7 +2438,7 @@ def test_naive_datetimeindex_roundtrip(conn, request): tm.assert_frame_equal(result, expected, check_names=False) -@pytest.mark.parametrize("conn", sqlalchemy_connectable_iris) +@pytest.mark.parametrize("conn", sqlalchemy_connectable_types) def test_date_parsing(conn, request): # No Parsing conn_name = conn @@ -3235,8 +3270,8 @@ def test_read_sql_dtype(conn, request, func, dtype_backend): tm.assert_frame_equal(result, expected) -def test_keyword_deprecation(sqlite_sqlalchemy_memory_engine): - conn = sqlite_sqlalchemy_memory_engine +def test_keyword_deprecation(sqlite_engine): + conn = sqlite_engine # GH 54397 msg = ( "Starting with pandas version 3.0 all arguments of to_sql except for the " @@ -3249,8 +3284,8 @@ def test_keyword_deprecation(sqlite_sqlalchemy_memory_engine): df.to_sql("example", conn, None, if_exists="replace") -def test_bigint_warning(sqlite_sqlalchemy_memory_engine): - conn = sqlite_sqlalchemy_memory_engine +def test_bigint_warning(sqlite_engine): + conn = sqlite_engine # test no warning for BIGINT (to support int64) is raised (GH7433) df = DataFrame({"a": [1, 2]}, dtype="int64") assert df.to_sql(name="test_bigintwarning", con=conn, index=False) == 2 @@ -3259,15 +3294,15 @@ def test_bigint_warning(sqlite_sqlalchemy_memory_engine): sql.read_sql_table("test_bigintwarning", conn) -def test_valueerror_exception(sqlite_sqlalchemy_memory_engine): - conn = sqlite_sqlalchemy_memory_engine +def test_valueerror_exception(sqlite_engine): + conn = sqlite_engine df = DataFrame({"col1": [1, 2], "col2": [3, 4]}) with pytest.raises(ValueError, match="Empty table name specified"): df.to_sql(name="", con=conn, if_exists="replace", index=False) -def test_row_object_is_named_tuple(sqlite_sqlalchemy_memory_engine): - conn = sqlite_sqlalchemy_memory_engine +def test_row_object_is_named_tuple(sqlite_engine): + conn = sqlite_engine # GH 40682 # Test for the is_named_tuple() function # Placed here due to its usage of sqlalchemy @@ -3305,8 +3340,8 @@ class Test(BaseModel): assert list(df.columns) == ["id", "string_column"] -def test_read_sql_string_inference(sqlite_sqlalchemy_memory_engine): - conn = sqlite_sqlalchemy_memory_engine +def test_read_sql_string_inference(sqlite_engine): + conn = sqlite_engine # GH#54430 pytest.importorskip("pyarrow") table = "test" @@ -3324,8 +3359,8 @@ def test_read_sql_string_inference(sqlite_sqlalchemy_memory_engine): tm.assert_frame_equal(result, expected) -def test_roundtripping_datetimes(sqlite_sqlalchemy_memory_engine): - conn = sqlite_sqlalchemy_memory_engine +def test_roundtripping_datetimes(sqlite_engine): + conn = sqlite_engine # GH#54877 df = DataFrame({"t": [datetime(2020, 12, 31, 12)]}, dtype="datetime64[ns]") df.to_sql("test", conn, if_exists="replace", index=False) @@ -3444,8 +3479,8 @@ def test_self_join_date_columns(postgresql_psycopg2_engine): pandasSQL.drop_table("person") -def test_create_and_drop_table(sqlite_sqlalchemy_memory_engine): - conn = sqlite_sqlalchemy_memory_engine +def test_create_and_drop_table(sqlite_engine): + conn = sqlite_engine temp_frame = DataFrame({"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}) with sql.SQLDatabase(conn) as pandasSQL: with pandasSQL.run_transaction(): @@ -3568,24 +3603,18 @@ def test_sqlite_illegal_names(sqlite_buildin): sql.table_exists(c_tbl, conn) -# ----------------------------------------------------------------------------- -# -- Old tests from 0.13.1 (before refactor using sqlalchemy) - - -_formatters = { - datetime: "'{}'".format, - str: "'{}'".format, - np.str_: "'{}'".format, - bytes: "'{}'".format, - float: "{:.8f}".format, - int: "{:d}".format, - type(None): lambda x: "NULL", - np.float64: "{:.10f}".format, - bool: "'{!s}'".format, -} - - def format_query(sql, *args): + _formatters = { + datetime: "'{}'".format, + str: "'{}'".format, + np.str_: "'{}'".format, + bytes: "'{}'".format, + float: "{:.8f}".format, + int: "{:d}".format, + type(None): lambda x: "NULL", + np.float64: "{:.10f}".format, + bool: "'{!s}'".format, + } processed_args = [] for arg in args: if isinstance(arg, float) and isna(arg):
Also factors out timezone data in a fixture to only apply to the single test where it's used
https://api.github.com/repos/pandas-dev/pandas/pulls/55265
2023-09-24T16:52:18Z
2023-10-25T17:18:06Z
2023-10-25T17:18:06Z
2023-10-25T17:18:10Z
TYP: Misc changes for pandas-stubs; use Protocol to avoid str in Sequence
diff --git a/pandas/_typing.py b/pandas/_typing.py index c2bbebfbe2857..0e2a0881f0122 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -24,6 +24,7 @@ Type as type_t, TypeVar, Union, + overload, ) import numpy as np @@ -85,6 +86,8 @@ # Name "npt._ArrayLikeInt_co" is not defined [name-defined] NumpySorter = Optional[npt._ArrayLikeInt_co] # type: ignore[name-defined] + from typing import SupportsIndex + if sys.version_info >= (3, 10): from typing import TypeGuard # pyright: ignore[reportUnusedImport] else: @@ -109,10 +112,40 @@ # list-like -# Cannot use `Sequence` because a string is a sequence, and we don't want to -# accept that. Could refine if https://github.com/python/typing/issues/256 is -# resolved to differentiate between Sequence[str] and str -ListLike = Union[AnyArrayLike, list, tuple, range] +# from https://github.com/hauntsaninja/useful_types +# includes Sequence-like objects but excludes str and bytes +_T_co = TypeVar("_T_co", covariant=True) + + +class SequenceNotStr(Protocol[_T_co]): + @overload + def __getitem__(self, index: SupportsIndex, /) -> _T_co: + ... + + @overload + def __getitem__(self, index: slice, /) -> Sequence[_T_co]: + ... + + def __contains__(self, value: object, /) -> bool: + ... + + def __len__(self) -> int: + ... + + def __iter__(self) -> Iterator[_T_co]: + ... + + def index(self, value: Any, /, start: int = 0, stop: int = ...) -> int: + ... + + def count(self, value: Any, /) -> int: + ... + + def __reversed__(self) -> Iterator[_T_co]: + ... + + +ListLike = Union[AnyArrayLike, SequenceNotStr, range] # scalars @@ -120,7 +153,7 @@ DatetimeLikeScalar = Union["Period", "Timestamp", "Timedelta"] PandasScalar = Union["Period", "Timestamp", "Timedelta", "Interval"] Scalar = Union[PythonScalar, PandasScalar, np.datetime64, np.timedelta64, date] -IntStrT = TypeVar("IntStrT", int, str) +IntStrT = TypeVar("IntStrT", bound=Union[int, str]) # timestamp and timedelta convertible types diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 3e32a6d93b023..432c0a745c7a0 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -240,6 +240,7 @@ Renamer, Scalar, Self, + SequenceNotStr, SortKind, StorageOptions, Suffixes, @@ -1187,7 +1188,7 @@ def to_string( buf: None = ..., columns: Axes | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., - header: bool | list[str] = ..., + header: bool | SequenceNotStr[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., @@ -1212,7 +1213,7 @@ def to_string( buf: FilePath | WriteBuffer[str], columns: Axes | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., - header: bool | list[str] = ..., + header: bool | SequenceNotStr[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., @@ -1250,7 +1251,7 @@ def to_string( buf: FilePath | WriteBuffer[str] | None = None, columns: Axes | None = None, col_space: int | list[int] | dict[Hashable, int] | None = None, - header: bool | list[str] = True, + header: bool | SequenceNotStr[str] = True, index: bool = True, na_rep: str = "NaN", formatters: fmt.FormattersType | None = None, @@ -10563,9 +10564,9 @@ def merge( self, right: DataFrame | Series, how: MergeHow = "inner", - on: IndexLabel | None = None, - left_on: IndexLabel | None = None, - right_on: IndexLabel | None = None, + on: IndexLabel | AnyArrayLike | None = None, + left_on: IndexLabel | AnyArrayLike | None = None, + right_on: IndexLabel | AnyArrayLike | None = None, left_index: bool = False, right_index: bool = False, sort: bool = False, diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 427687d9614f9..738f4cbe6bc43 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -72,6 +72,7 @@ Renamer, Scalar, Self, + SequenceNotStr, SortKind, StorageOptions, Suffixes, @@ -3273,7 +3274,7 @@ def to_latex( self, buf: None = ..., columns: Sequence[Hashable] | None = ..., - header: bool_t | list[str] = ..., + header: bool_t | SequenceNotStr[str] = ..., index: bool_t = ..., na_rep: str = ..., formatters: FormattersType | None = ..., @@ -3300,7 +3301,7 @@ def to_latex( self, buf: FilePath | WriteBuffer[str], columns: Sequence[Hashable] | None = ..., - header: bool_t | list[str] = ..., + header: bool_t | SequenceNotStr[str] = ..., index: bool_t = ..., na_rep: str = ..., formatters: FormattersType | None = ..., @@ -3330,7 +3331,7 @@ def to_latex( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[Hashable] | None = None, - header: bool_t | list[str] = True, + header: bool_t | SequenceNotStr[str] = True, index: bool_t = True, na_rep: str = "NaN", formatters: FormattersType | None = None, diff --git a/pandas/core/methods/describe.py b/pandas/core/methods/describe.py index 5bb6bebd8a87b..dcdf0067d45b0 100644 --- a/pandas/core/methods/describe.py +++ b/pandas/core/methods/describe.py @@ -301,7 +301,7 @@ def describe_timestamp_as_categorical_1d( names = ["count", "unique"] objcounts = data.value_counts() count_unique = len(objcounts[objcounts != 0]) - result = [data.count(), count_unique] + result: list[float | Timestamp] = [data.count(), count_unique] dtype = None if count_unique > 0: top, freq = objcounts.index[0], objcounts.iloc[0] diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 30d654078bd05..b6323e8c8b5f9 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -1541,7 +1541,7 @@ def count(self): return result - def quantile(self, q: float | AnyArrayLike = 0.5, **kwargs): + def quantile(self, q: float | list[float] | AnyArrayLike = 0.5, **kwargs): """ Return value at the given quantile. diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 6d1ff07e07c76..4b9fcc80af4bb 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -138,9 +138,9 @@ def merge( left: DataFrame | Series, right: DataFrame | Series, how: MergeHow = "inner", - on: IndexLabel | None = None, - left_on: IndexLabel | None = None, - right_on: IndexLabel | None = None, + on: IndexLabel | AnyArrayLike | None = None, + left_on: IndexLabel | AnyArrayLike | None = None, + right_on: IndexLabel | AnyArrayLike | None = None, left_index: bool = False, right_index: bool = False, sort: bool = False, @@ -187,9 +187,9 @@ def merge( def _cross_merge( left: DataFrame, right: DataFrame, - on: IndexLabel | None = None, - left_on: IndexLabel | None = None, - right_on: IndexLabel | None = None, + on: IndexLabel | AnyArrayLike | None = None, + left_on: IndexLabel | AnyArrayLike | None = None, + right_on: IndexLabel | AnyArrayLike | None = None, left_index: bool = False, right_index: bool = False, sort: bool = False, @@ -239,7 +239,9 @@ def _cross_merge( return res -def _groupby_and_merge(by, left: DataFrame, right: DataFrame, merge_pieces): +def _groupby_and_merge( + by, left: DataFrame | Series, right: DataFrame | Series, merge_pieces +): """ groupby & merge; we are always performing a left-by type operation @@ -255,7 +257,7 @@ def _groupby_and_merge(by, left: DataFrame, right: DataFrame, merge_pieces): by = [by] lby = left.groupby(by, sort=False) - rby: groupby.DataFrameGroupBy | None = None + rby: groupby.DataFrameGroupBy | groupby.SeriesGroupBy | None = None # if we can groupby the rhs # then we can get vastly better perf @@ -295,8 +297,8 @@ def _groupby_and_merge(by, left: DataFrame, right: DataFrame, merge_pieces): def merge_ordered( - left: DataFrame, - right: DataFrame, + left: DataFrame | Series, + right: DataFrame | Series, on: IndexLabel | None = None, left_on: IndexLabel | None = None, right_on: IndexLabel | None = None, @@ -737,9 +739,9 @@ def __init__( left: DataFrame | Series, right: DataFrame | Series, how: MergeHow | Literal["asof"] = "inner", - on: IndexLabel | None = None, - left_on: IndexLabel | None = None, - right_on: IndexLabel | None = None, + on: IndexLabel | AnyArrayLike | None = None, + left_on: IndexLabel | AnyArrayLike | None = None, + right_on: IndexLabel | AnyArrayLike | None = None, left_index: bool = False, right_index: bool = False, sort: bool = True, diff --git a/pandas/core/series.py b/pandas/core/series.py index d3a2bb1745cd1..fd50a85f3c2e3 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2141,7 +2141,7 @@ def groupby( # Statistics, overridden ndarray methods # TODO: integrate bottleneck - def count(self): + def count(self) -> int: """ Return number of non-NA/null observations in the Series. diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index 8d0edd88ffb6c..569c8aaf6cef1 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -21,6 +21,7 @@ import numpy as np from pandas._libs import writers as libwriters +from pandas._typing import SequenceNotStr from pandas.util._decorators import cache_readonly from pandas.core.dtypes.generic import ( @@ -109,7 +110,7 @@ def decimal(self) -> str: return self.fmt.decimal @property - def header(self) -> bool | list[str]: + def header(self) -> bool | SequenceNotStr[str]: return self.fmt.header @property @@ -213,7 +214,7 @@ def _need_to_save_header(self) -> bool: return bool(self._has_aliases or self.header) @property - def write_cols(self) -> Sequence[Hashable]: + def write_cols(self) -> SequenceNotStr[Hashable]: if self._has_aliases: assert not isinstance(self.header, bool) if len(self.header) != len(self.cols): @@ -224,7 +225,7 @@ def write_cols(self) -> Sequence[Hashable]: else: # self.cols is an ndarray derived from Index._format_native_types, # so its entries are strings, i.e. hashable - return cast(Sequence[Hashable], self.cols) + return cast(SequenceNotStr[Hashable], self.cols) @property def encoded_labels(self) -> list[Hashable]: diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 2297f7945a264..922d0f37bee3a 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -105,6 +105,7 @@ FloatFormatType, FormattersType, IndexLabel, + SequenceNotStr, StorageOptions, WriteBuffer, ) @@ -566,7 +567,7 @@ def __init__( frame: DataFrame, columns: Axes | None = None, col_space: ColspaceArgType | None = None, - header: bool | list[str] = True, + header: bool | SequenceNotStr[str] = True, index: bool = True, na_rep: str = "NaN", formatters: FormattersType | None = None, diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index f015c9efe7122..e1839fc1b0a67 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -3161,8 +3161,6 @@ def dtype_backend_data() -> DataFrame: @pytest.fixture def dtype_backend_expected(): def func(storage, dtype_backend, conn_name): - string_array: StringArray | ArrowStringArray - string_array_na: StringArray | ArrowStringArray if storage == "python": string_array = StringArray(np.array(["a", "b", "c"], dtype=np.object_)) string_array_na = StringArray(np.array(["a", "b", pd.NA], dtype=np.object_))
The first commit addresses some type issues found by running mypy on the tests from pandas-stubs (one big issue is that Index/Series are not included in IndexLabel; there might be cases where that is correct but in many cases they should be included). The second commit re-writes #47233 with the protocol version of Sequence. Can split it in two PRs if one of them is controversial.
https://api.github.com/repos/pandas-dev/pandas/pulls/55263
2023-09-24T14:25:51Z
2023-09-26T15:48:56Z
2023-09-26T15:48:56Z
2023-12-10T04:33:43Z
CLN: Remove temp_setattr from groupby.transform
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index d607baf18d6cb..a022bfd1bd9bc 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -2012,15 +2012,13 @@ def _transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs): # If func is a reduction, we need to broadcast the # result to the whole group. Compute func result # and deal with possible broadcasting below. - # Temporarily set observed for dealing with categoricals. - with com.temp_setattr(self, "observed", True): - with com.temp_setattr(self, "as_index", True): - # GH#49834 - result needs groups in the index for - # _wrap_transform_fast_result - if engine is not None: - kwargs["engine"] = engine - kwargs["engine_kwargs"] = engine_kwargs - result = getattr(self, func)(*args, **kwargs) + with com.temp_setattr(self, "as_index", True): + # GH#49834 - result needs groups in the index for + # _wrap_transform_fast_result + if engine is not None: + kwargs["engine"] = engine + kwargs["engine_kwargs"] = engine_kwargs + result = getattr(self, func)(*args, **kwargs) return self._wrap_transform_fast_result(result)
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Using temp_setattr doesn't actually change anything. All of the objects that `observed` impacts are cached and have already been computed.
https://api.github.com/repos/pandas-dev/pandas/pulls/55262
2023-09-24T13:09:08Z
2023-09-24T17:21:30Z
2023-09-24T17:21:30Z
2023-09-24T17:21:36Z
Fix value_counts for sort=False parameter
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 1d74bb8b83e4e..860a9d8951fd5 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -871,6 +871,7 @@ def value_counts_internal( Index, Series, ) + index_name = getattr(values, "name", None) name = "proportion" if normalize else "count" @@ -903,7 +904,7 @@ def value_counts_internal( else: if is_extension_array_dtype(values): # handle Categorical and sparse, - result = Series(values, copy=False)._values.value_counts(dropna=dropna) + result = Series(values, copy=False)._values.value_counts(dropna=dropna, sort=sort) result.name = name result.index.name = index_name counts = result._values diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 8d2633c10b428..8f84ba3e87f4a 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -1753,7 +1753,7 @@ def notna(self) -> npt.NDArray[np.bool_]: notnull = notna - def value_counts(self, dropna: bool = True) -> Series: + def value_counts(self, dropna: bool = True, sort: bool = True) -> Series: """ Return a Series containing counts of each category. @@ -1763,6 +1763,9 @@ def value_counts(self, dropna: bool = True) -> Series: ---------- dropna : bool, default True Don't include counts of NaN. + sort : bool, default True + Present for only API compatibility. For categorical + data, sorting does not have any effect on the output. Returns ------- diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index fdcbe67bbc371..27c70d99fd284 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -1021,7 +1021,7 @@ def factorize( def _values_for_argsort(self) -> np.ndarray: return self._data - def value_counts(self, dropna: bool = True) -> Series: + def value_counts(self, dropna: bool = True, sort: bool = True) -> Series: """ Returns a Series containing counts of each unique value. @@ -1029,6 +1029,8 @@ def value_counts(self, dropna: bool = True) -> Series: ---------- dropna : bool, default True Don't include counts of missing values. + sort: bool, default True + Present for only API compatibility. Returns ------- diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index 693ebad0ca16f..d60622de2a107 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -520,10 +520,10 @@ def max(self, axis=None, skipna: bool = True, **kwargs) -> Scalar: ) return self._wrap_reduction_result(axis, result) - def value_counts(self, dropna: bool = True) -> Series: + def value_counts(self, dropna: bool = True, sort: bool = True) -> Series: from pandas.core.algorithms import value_counts_internal as value_counts - result = value_counts(self._ndarray, dropna=dropna).astype("Int64") + result = value_counts(self._ndarray, dropna=dropna, sort=sort).astype("Int64") result.index = result.index.astype(self.dtype) return result diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 661290fb00d13..e988249419fbe 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -1282,6 +1282,16 @@ def test_categorical(self): result = s.value_counts() expected.index = expected.index.as_ordered() tm.assert_series_equal(result, expected, check_index_type=True) + + def test_value_counts_gh55224(self): + s = Series(data = ['a', 'b', 'c', 'b']) + result = s.astype('string').value_counts(sort=False) + expected = Series( + data=pd.array([1, 2, 1], dtype=pd.Int64Dtype()), + index=pd.array(["a", "b", "c"], dtype="string"), + name="count" + ) + tm.assert_series_equal(result, expected) def test_categorical_nans(self): s = Series(Categorical(list("aaaaabbbcc"))) # 4,3,2,1 (nan)
- [x] closes #55224 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55260
2023-09-24T07:32:20Z
2023-12-18T19:34:38Z
null
2023-12-18T19:34:38Z
BUG: DatetimeIndex.union returning object dtype for indexes with the same tz but different units
diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 930e03ae7d75a..ca09a69f603ab 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -259,7 +259,7 @@ Categorical Datetimelike ^^^^^^^^^^^^ -- +- Bug in :meth:`DatetimeIndex.union` returning object dtype for tz-aware indexes with the same timezone but different units (:issue:`55238`) - Timedelta diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index beff71d5e9dd9..196c95c3673e9 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -928,6 +928,13 @@ def __setstate__(self, state) -> None: self._tz = state["tz"] self._unit = state["unit"] + def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: + if all(isinstance(t, DatetimeTZDtype) and t.tz == self.tz for t in dtypes): + np_dtype = np.max([cast(DatetimeTZDtype, t).base for t in [self, *dtypes]]) + unit = np.datetime_data(np_dtype)[0] + return type(self)(unit=unit, tz=self.tz) + return super()._get_common_dtype(dtypes) + @cache_readonly def index_class(self) -> type_t[DatetimeIndex]: from pandas import DatetimeIndex diff --git a/pandas/tests/indexes/datetimes/test_setops.py b/pandas/tests/indexes/datetimes/test_setops.py index b56bad7f2e833..ca784948a5d29 100644 --- a/pandas/tests/indexes/datetimes/test_setops.py +++ b/pandas/tests/indexes/datetimes/test_setops.py @@ -189,6 +189,14 @@ def test_union_with_DatetimeIndex(self, sort): # Fails with "AttributeError: can't set attribute" i2.union(i1, sort=sort) + def test_union_same_timezone_different_units(self): + # GH 55238 + idx1 = date_range("2000-01-01", periods=3, tz="UTC").as_unit("ms") + idx2 = date_range("2000-01-01", periods=3, tz="UTC").as_unit("us") + result = idx1.union(idx2) + expected = date_range("2000-01-01", periods=3, tz="UTC").as_unit("us") + tm.assert_index_equal(result, expected) + # TODO: moved from test_datetimelike; de-duplicate with version below def test_intersection2(self): first = tm.makeDateIndex(10)
- [x] closes #55238 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/v2.2.0.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55259
2023-09-24T01:13:41Z
2023-09-25T18:42:45Z
2023-09-25T18:42:45Z
2023-11-16T12:56:53Z
BUG: Series[slc]=foo raising with IntervalIndex
diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 930e03ae7d75a..38a1c1511af73 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -292,6 +292,7 @@ Interval - Bug in :class:`Interval` ``__repr__`` not displaying UTC offsets for :class:`Timestamp` bounds. Additionally the hour, minute and second components will now be shown. (:issue:`55015`) - Bug in :meth:`IntervalIndex.get_indexer` with datetime or timedelta intervals incorrectly matching on integer targets (:issue:`47772`) - Bug in :meth:`IntervalIndex.get_indexer` with timezone-aware datetime intervals incorrectly matching on a sequence of timezone-naive targets (:issue:`47772`) +- Bug in setting values on a :class:`Series` with an :class:`IntervalIndex` using a slice incorrectly raising (:issue:`54722`) - Indexing diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 8703fef1e5940..e23887159c9c6 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4204,15 +4204,9 @@ def _convert_slice_indexer(self, key: slice, kind: Literal["loc", "getitem"]): self._validate_indexer("slice", key.step, "getitem") return key - # convert the slice to an indexer here - - # special case for interval_dtype bc we do not do partial-indexing - # on integer Intervals when slicing - # TODO: write this in terms of e.g. should_partial_index? - ints_are_positional = self._should_fallback_to_positional or isinstance( - self.dtype, IntervalDtype - ) - is_positional = is_index_slice and ints_are_positional + # convert the slice to an indexer here; checking that the user didn't + # pass a positional slice to loc + is_positional = is_index_slice and self._should_fallback_to_positional # if we are mixed and have integers if is_positional: diff --git a/pandas/tests/indexing/interval/test_interval.py b/pandas/tests/indexing/interval/test_interval.py index 52a1d433712ff..ae25724972fde 100644 --- a/pandas/tests/indexing/interval/test_interval.py +++ b/pandas/tests/indexing/interval/test_interval.py @@ -134,6 +134,33 @@ def test_getitem_interval_with_nans(self, frame_or_series, indexer_sl): tm.assert_equal(result, expected) + def test_setitem_interval_with_slice(self): + # GH#54722 + ii = IntervalIndex.from_breaks(range(4, 15)) + ser = Series(range(10), index=ii) + + orig = ser.copy() + + # This should be a no-op (used to raise) + ser.loc[1:3] = 20 + tm.assert_series_equal(ser, orig) + + ser.loc[6:8] = 19 + orig.iloc[1:4] = 19 + tm.assert_series_equal(ser, orig) + + ser2 = Series(range(5), index=ii[::2]) + orig2 = ser2.copy() + + # this used to raise + ser2.loc[6:8] = 22 # <- raises on main, sets on branch + orig2.iloc[1] = 22 + tm.assert_series_equal(ser2, orig2) + + ser2.loc[5:7] = 21 + orig2.iloc[:2] = 21 + tm.assert_series_equal(ser2, orig2) + class TestIntervalIndexInsideMultiIndex: def test_mi_intervalindex_slicing_with_scalar(self):
- [x] closes #54722 (Replace xxxx with the GitHub issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55258
2023-09-23T22:06:37Z
2023-09-25T18:43:41Z
2023-09-25T18:43:40Z
2023-09-25T19:49:25Z
DEPR: Index.insert dtype-inference
diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 919ac8b03f936..c2032b0d34536 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -454,6 +454,7 @@ Other Deprecations - Deprecated allowing non-keyword arguments in :meth:`DataFrame.to_string` except ``buf``. (:issue:`54229`) - Deprecated allowing non-keyword arguments in :meth:`DataFrame.to_xml` except ``path_or_buffer``. (:issue:`54229`) - Deprecated allowing passing :class:`BlockManager` objects to :class:`DataFrame` or :class:`SingleBlockManager` objects to :class:`Series` (:issue:`52419`) +- Deprecated behavior of :meth:`Index.insert` with an object-dtype index silently performing type inference on the result, explicitly call ``result.infer_objects(copy=False)`` for the old behavior instead (:issue:`51363`) - Deprecated downcasting behavior in :meth:`Series.where`, :meth:`DataFrame.where`, :meth:`Series.mask`, :meth:`DataFrame.mask`, :meth:`Series.clip`, :meth:`DataFrame.clip`; in a future version these will not infer object-dtype columns to non-object dtype, or all-round floats to integer dtype. Call ``result.infer_objects(copy=False)`` on the result for object inference, or explicitly cast floats to ints. To opt in to the future version, use ``pd.set_option("future.no_silent_downcasting", True)`` (:issue:`53656`) - Deprecated including the groups in computations when using :meth:`.DataFrameGroupBy.apply` and :meth:`.DataFrameGroupBy.resample`; pass ``include_groups=False`` to exclude the groups (:issue:`7155`) - Deprecated indexing an :class:`Index` with a boolean indexer of length zero (:issue:`55820`) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 6c9f93d3482a7..3abe77b97fe58 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -6939,14 +6939,24 @@ def insert(self, loc: int, item) -> Index: loc = loc if loc >= 0 else loc - 1 new_values[loc] = item - idx = Index._with_infer(new_values, name=self.name) + out = Index._with_infer(new_values, name=self.name) if ( using_pyarrow_string_dtype() - and is_string_dtype(idx.dtype) + and is_string_dtype(out.dtype) and new_values.dtype == object ): - idx = idx.astype(new_values.dtype) - return idx + out = out.astype(new_values.dtype) + if self.dtype == object and out.dtype != object: + # GH#51363 + warnings.warn( + "The behavior of Index.insert with object-dtype is deprecated, " + "in a future version this will return an object-dtype Index " + "instead of inferring a non-object dtype. To retain the old " + "behavior, do `idx.insert(loc, item).infer_objects(copy=False)`", + FutureWarning, + stacklevel=find_stack_level(), + ) + return out def drop( self, diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index e3928621a4e48..c233295b25700 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1893,7 +1893,15 @@ def _setitem_with_indexer(self, indexer, value, name: str = "iloc"): # just replacing the block manager here # so the object is the same index = self.obj._get_axis(i) - labels = index.insert(len(index), key) + with warnings.catch_warnings(): + # TODO: re-issue this with setitem-specific message? + warnings.filterwarnings( + "ignore", + "The behavior of Index.insert with object-dtype " + "is deprecated", + category=FutureWarning, + ) + labels = index.insert(len(index), key) # We are expanding the Series/DataFrame values to match # the length of thenew index `labels`. GH#40096 ensure @@ -2186,7 +2194,14 @@ def _setitem_with_indexer_missing(self, indexer, value): # and set inplace if self.ndim == 1: index = self.obj.index - new_index = index.insert(len(index), indexer) + with warnings.catch_warnings(): + # TODO: re-issue this with setitem-specific message? + warnings.filterwarnings( + "ignore", + "The behavior of Index.insert with object-dtype is deprecated", + category=FutureWarning, + ) + new_index = index.insert(len(index), indexer) # we have a coerced indexer, e.g. a float # that matches in an int64 Index, so diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index cc88312d5b58f..6eb4099b4d830 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1376,8 +1376,14 @@ def insert(self, loc: int, item: Hashable, value: ArrayLike, refs=None) -> None: value : np.ndarray or ExtensionArray refs : The reference tracking object of the value to set. """ - # insert to the axis; this could possibly raise a TypeError - new_axis = self.items.insert(loc, item) + with warnings.catch_warnings(): + # TODO: re-issue this with setitem-specific message? + warnings.filterwarnings( + "ignore", + "The behavior of Index.insert with object-dtype is deprecated", + category=FutureWarning, + ) + new_axis = self.items.insert(loc, item) if value.ndim == 2: value = value.T diff --git a/pandas/tests/indexes/test_old_base.py b/pandas/tests/indexes/test_old_base.py index f08de8e65451c..0fff6abcfc6a5 100644 --- a/pandas/tests/indexes/test_old_base.py +++ b/pandas/tests/indexes/test_old_base.py @@ -407,13 +407,20 @@ def test_where(self, listlike_box, simple_index): tm.assert_index_equal(result, expected) def test_insert_base(self, index): - result = index[1:4] + trimmed = index[1:4] if not len(index): pytest.skip("Not applicable for empty index") # test 0th element - assert index[0:4].equals(result.insert(0, index[0])) + warn = None + if index.dtype == object and index.inferred_type == "boolean": + # GH#51363 + warn = FutureWarning + msg = "The behavior of Index.insert with object-dtype is deprecated" + with tm.assert_produces_warning(warn, match=msg): + result = trimmed.insert(0, index[0]) + assert index[0:4].equals(result) def test_insert_out_of_bounds(self, index): # TypeError/IndexError matches what np.insert raises in these cases
- [x] closes #51363 (Replace xxxx with the GitHub issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Doesn't have a "do X to avoid this warning, not sure if this merits future option or temporary keyword.
https://api.github.com/repos/pandas-dev/pandas/pulls/55257
2023-09-23T20:45:53Z
2023-12-08T23:39:11Z
2023-12-08T23:39:11Z
2023-12-09T16:42:08Z
ENH/PERF: add ExtensionArray.duplicated
diff --git a/asv_bench/benchmarks/algorithms.py b/asv_bench/benchmarks/algorithms.py index 2584e1f13853a..192f19c36b47d 100644 --- a/asv_bench/benchmarks/algorithms.py +++ b/asv_bench/benchmarks/algorithms.py @@ -1,6 +1,7 @@ from importlib import import_module import numpy as np +import pyarrow as pa import pandas as pd @@ -72,7 +73,16 @@ class Duplicated: params = [ [True, False], ["first", "last", False], - ["int", "uint", "float", "string", "datetime64[ns]", "datetime64[ns, tz]"], + [ + "int", + "uint", + "float", + "string", + "datetime64[ns]", + "datetime64[ns, tz]", + "timestamp[ms][pyarrow]", + "duration[s][pyarrow]", + ], ] param_names = ["unique", "keep", "dtype"] @@ -87,6 +97,12 @@ def setup(self, unique, keep, dtype): "datetime64[ns, tz]": pd.date_range( "2011-01-01", freq="H", periods=N, tz="Asia/Tokyo" ), + "timestamp[ms][pyarrow]": pd.Index( + np.arange(N), dtype=pd.ArrowDtype(pa.timestamp("ms")) + ), + "duration[s][pyarrow]": pd.Index( + np.arange(N), dtype=pd.ArrowDtype(pa.duration("s")) + ), }[dtype] if not unique: data = data.repeat(5) diff --git a/doc/source/reference/extensions.rst b/doc/source/reference/extensions.rst index 83f830bb11198..e412793a328a3 100644 --- a/doc/source/reference/extensions.rst +++ b/doc/source/reference/extensions.rst @@ -49,6 +49,7 @@ objects. api.extensions.ExtensionArray.copy api.extensions.ExtensionArray.view api.extensions.ExtensionArray.dropna + api.extensions.ExtensionArray.duplicated api.extensions.ExtensionArray.equals api.extensions.ExtensionArray.factorize api.extensions.ExtensionArray.fillna diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 9dc095e6de6ff..7667caa830bb7 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -76,6 +76,7 @@ Other enhancements - :func:`read_csv` now supports ``on_bad_lines`` parameter with ``engine="pyarrow"``. (:issue:`54480`) - :meth:`ExtensionArray._explode` interface method added to allow extension type implementations of the ``explode`` method (:issue:`54833`) +- :meth:`ExtensionArray.duplicated` added to allow extension type implementations of the ``duplicated`` method (:issue:`55255`) - DataFrame.apply now allows the usage of numba (via ``engine="numba"``) to JIT compile the passed function, allowing for potential speedups (:issue:`54666`) - Implement masked algorithms for :meth:`Series.value_counts` (:issue:`54984`) - @@ -241,6 +242,7 @@ Performance improvements - Performance improvement in :meth:`DataFrame.groupby` when aggregating pyarrow timestamp and duration dtypes (:issue:`55031`) - Performance improvement in :meth:`DataFrame.sort_index` and :meth:`Series.sort_index` when indexed by a :class:`MultiIndex` (:issue:`54835`) - Performance improvement in :meth:`Index.difference` (:issue:`55108`) +- Performance improvement in :meth:`Series.duplicated` for pyarrow dtypes (:issue:`55255`) - Performance improvement when indexing with more than 4 keys (:issue:`54550`) - Performance improvement when localizing time to UTC (:issue:`55241`) diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index c952178f4c998..4ff3de2fc7b2b 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -55,7 +55,6 @@ ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.dtypes import ( - ArrowDtype, BaseMaskedDtype, CategoricalDtype, ExtensionDtype, @@ -979,14 +978,16 @@ def value_counts_arraylike( def duplicated( - values: ArrayLike, keep: Literal["first", "last", False] = "first" + values: ArrayLike, + keep: Literal["first", "last", False] = "first", + mask: npt.NDArray[np.bool_] | None = None, ) -> npt.NDArray[np.bool_]: """ Return boolean ndarray denoting duplicate values. Parameters ---------- - values : nd.array, ExtensionArray or Series + values : np.ndarray or ExtensionArray Array over which to check for duplicate values. keep : {'first', 'last', False}, default 'first' - ``first`` : Mark duplicates as ``True`` except for the first @@ -994,21 +995,15 @@ def duplicated( - ``last`` : Mark duplicates as ``True`` except for the last occurrence. - False : Mark all duplicates as ``True``. + mask : ndarray[bool], optional + array indicating which elements to exclude from checking Returns ------- duplicated : ndarray[bool] """ - if hasattr(values, "dtype"): - if isinstance(values.dtype, ArrowDtype) and values.dtype.kind in "ifub": - values = values._to_masked() # type: ignore[union-attr] - - if isinstance(values.dtype, BaseMaskedDtype): - values = cast("BaseMaskedArray", values) - return htable.duplicated(values._data, keep=keep, mask=values._mask) - values = _ensure_data(values) - return htable.duplicated(values, keep=keep) + return htable.duplicated(values, keep=keep, mask=mask) def mode( diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index 4b79d0dbb683e..0579aa3760531 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -42,6 +42,7 @@ from pandas.core.dtypes.missing import isna from pandas.core import ( + algorithms as algos, missing, roperator, ) @@ -1289,6 +1290,30 @@ def to_numpy( result[~mask] = data[~mask]._pa_array.to_numpy() return result + @doc(ExtensionArray.duplicated) + def duplicated( + self, keep: Literal["first", "last", False] = "first" + ) -> npt.NDArray[np.bool_]: + pa_type = self._pa_array.type + if pa.types.is_floating(pa_type) or pa.types.is_integer(pa_type): + values = self.to_numpy(na_value=0) + elif pa.types.is_boolean(pa_type): + values = self.to_numpy(na_value=False) + elif pa.types.is_temporal(pa_type): + if pa_type.bit_width == 32: + pa_type = pa.int32() + else: + pa_type = pa.int64() + arr = self.astype(ArrowDtype(pa_type)) + values = arr.to_numpy(na_value=0) + else: + # factorize the values to avoid the performance penalty of + # converting to object dtype + values = self.factorize()[0] + + mask = self.isna() if self._hasna else None + return algos.duplicated(values, keep=keep, mask=mask) + def unique(self) -> Self: """ Compute the ArrowExtensionArray of unique values. diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 933944dbd4632..c06bf7366447b 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -61,6 +61,7 @@ roperator, ) from pandas.core.algorithms import ( + duplicated, factorize_array, isin, map_array, @@ -125,6 +126,7 @@ class ExtensionArray: astype copy dropna + duplicated factorize fillna equals @@ -1116,6 +1118,31 @@ def dropna(self) -> Self: # error: Unsupported operand type for ~ ("ExtensionArray") return self[~self.isna()] # type: ignore[operator] + def duplicated( + self, keep: Literal["first", "last", False] = "first" + ) -> npt.NDArray[np.bool_]: + """ + Return boolean ndarray denoting duplicate values. + + Parameters + ---------- + keep : {'first', 'last', False}, default 'first' + - ``first`` : Mark duplicates as ``True`` except for the first occurrence. + - ``last`` : Mark duplicates as ``True`` except for the last occurrence. + - False : Mark all duplicates as ``True``. + + Returns + ------- + ndarray[bool] + + Examples + -------- + >>> pd.array([1, 1, 2, 3, 3], dtype="Int64").duplicated() + array([False, True, False, False, True]) + """ + mask = self.isna().astype(np.bool_, copy=False) + return duplicated(values=self, keep=keep, mask=mask) + def shift(self, periods: int = 1, fill_value: object = None) -> ExtensionArray: """ Shift values by desired number. diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index 9b85fb0477e6f..56d3711c7d13b 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -952,6 +952,14 @@ def copy(self) -> Self: mask = self._mask.copy() return self._simple_new(data, mask) + @doc(ExtensionArray.duplicated) + def duplicated( + self, keep: Literal["first", "last", False] = "first" + ) -> npt.NDArray[np.bool_]: + values = self._data + mask = self._mask + return algos.duplicated(values, keep=keep, mask=mask) + def unique(self) -> Self: """ Compute the BaseMaskedArray of unique values. diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index 4d5eef960293f..cf349220e4ba7 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -28,6 +28,7 @@ from pandas._libs.tslibs import NaT from pandas.compat.numpy import function as nv from pandas.errors import PerformanceWarning +from pandas.util._decorators import doc from pandas.util._exceptions import find_stack_level from pandas.util._validators import ( validate_bool_kwarg, @@ -830,6 +831,14 @@ def _first_fill_value_loc(self): diff = np.r_[np.diff(indices), 2] return indices[(diff > 1).argmax()] + 1 + @doc(ExtensionArray.duplicated) + def duplicated( + self, keep: Literal["first", "last", False] = "first" + ) -> npt.NDArray[np.bool_]: + values = np.asarray(self) + mask = np.asarray(self.isna()) + return algos.duplicated(values, keep=keep, mask=mask) + def unique(self) -> Self: uniques = algos.unique(self.sp_values) if len(self.sp_values) != len(self): diff --git a/pandas/core/base.py b/pandas/core/base.py index 3026189e747bb..d4421560bcea7 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -1365,7 +1365,10 @@ def drop_duplicates(self, *, keep: DropKeep = "first"): @final def _duplicated(self, keep: DropKeep = "first") -> npt.NDArray[np.bool_]: - return algorithms.duplicated(self._values, keep=keep) + arr = self._values + if isinstance(arr, ExtensionArray): + return arr.duplicated(keep=keep) + return algorithms.duplicated(arr, keep=keep) def _arith_method(self, other, op): res_name = ops.get_op_result_name(self, other) diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py index 4e0bc8d804bab..e10c6ef9a7018 100644 --- a/pandas/tests/extension/base/methods.py +++ b/pandas/tests/extension/base/methods.py @@ -248,6 +248,18 @@ def test_sort_values_frame(self, data_for_sorting, ascending): ) tm.assert_frame_equal(result, expected) + @pytest.mark.parametrize("keep", ["first", "last", False]) + def test_duplicated(self, data, keep): + arr = data.take([0, 1, 0, 1]) + result = arr.duplicated(keep=keep) + if keep == "first": + expected = np.array([False, False, True, True]) + elif keep == "last": + expected = np.array([True, True, False, False]) + else: + expected = np.array([True, True, True, True]) + tm.assert_numpy_array_equal(result, expected) + @pytest.mark.parametrize("box", [pd.Series, lambda x: x]) @pytest.mark.parametrize("method", [lambda x: x.unique(), pd.unique]) def test_unique(self, data, box, method):
- [x] closes #27264 - [x] closes #48424 - [x] closes #48788 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/v2.2.0.rst` file if fixing a bug or adding a new feature. [updated] `asv continuous -f 1.1 upstream/main arrow-temporal-duplicated -b algorithms.Duplicated` ``` before after ratio - 101±2ms 51.6±3ms 0.51 algorithms.Duplicated.time_duplicated(False, False, 'string[pyarrow]') - 98.5±3ms 47.3±4ms 0.48 algorithms.Duplicated.time_duplicated(False, 'first', 'string[pyarrow]') - 96.0±4ms 45.5±4ms 0.47 algorithms.Duplicated.time_duplicated(False, 'last', 'string[pyarrow]') - 1.57±0.03s 13.6±0.3ms 0.01 algorithms.Duplicated.time_duplicated(False, False, 'timestamp[ms][pyarrow]') - 1.54±0.02s 13.1±0.7ms 0.01 algorithms.Duplicated.time_duplicated(False, 'first', 'timestamp[ms][pyarrow]') - 1.55±0s 12.8±0.7ms 0.01 algorithms.Duplicated.time_duplicated(False, 'last', 'timestamp[ms][pyarrow]') ```
https://api.github.com/repos/pandas-dev/pandas/pulls/55255
2023-09-23T02:31:29Z
2023-10-03T15:48:01Z
2023-10-03T15:48:01Z
2023-11-16T12:56:51Z
TYP: towards matplotlib 3.8
diff --git a/ci/deps/actions-310.yaml b/ci/deps/actions-310.yaml index b22f8cb34c814..94652e8586d77 100644 --- a/ci/deps/actions-310.yaml +++ b/ci/deps/actions-310.yaml @@ -33,7 +33,7 @@ dependencies: - gcsfs>=2022.11.0 - jinja2>=3.1.2 - lxml>=4.9.2 - - matplotlib>=3.6.3, <3.8 + - matplotlib>=3.6.3 - numba>=0.56.4 - numexpr>=2.8.4 - odfpy>=1.4.1 diff --git a/ci/deps/actions-311-downstream_compat.yaml b/ci/deps/actions-311-downstream_compat.yaml index ceea734352fca..bf47bfe3a83ec 100644 --- a/ci/deps/actions-311-downstream_compat.yaml +++ b/ci/deps/actions-311-downstream_compat.yaml @@ -34,7 +34,7 @@ dependencies: - gcsfs>=2022.11.0 - jinja2>=3.1.2 - lxml>=4.9.2 - - matplotlib>=3.6.3, <3.8 + - matplotlib>=3.6.3 - numba>=0.56.4 - numexpr>=2.8.4 - odfpy>=1.4.1 diff --git a/ci/deps/actions-311.yaml b/ci/deps/actions-311.yaml index 1c9f349c2eb28..bd9e2059ef477 100644 --- a/ci/deps/actions-311.yaml +++ b/ci/deps/actions-311.yaml @@ -33,7 +33,7 @@ dependencies: - gcsfs>=2022.11.0 - jinja2>=3.1.2 - lxml>=4.9.2 - - matplotlib>=3.6.3, <3.8 + - matplotlib>=3.6.3 - numba>=0.56.4 - numexpr>=2.8.4 - odfpy>=1.4.1 diff --git a/ci/deps/actions-39.yaml b/ci/deps/actions-39.yaml index 92b88a34094d2..cf4087a3e4670 100644 --- a/ci/deps/actions-39.yaml +++ b/ci/deps/actions-39.yaml @@ -33,7 +33,7 @@ dependencies: - gcsfs>=2022.11.0 - jinja2>=3.1.2 - lxml>=4.9.2 - - matplotlib>=3.6.3, <3.8 + - matplotlib>=3.6.3 - numba>=0.56.4 - numexpr>=2.8.4 - odfpy>=1.4.1 diff --git a/ci/deps/circle-310-arm64.yaml b/ci/deps/circle-310-arm64.yaml index f81b91fcbae3b..abe6145d077ed 100644 --- a/ci/deps/circle-310-arm64.yaml +++ b/ci/deps/circle-310-arm64.yaml @@ -33,7 +33,7 @@ dependencies: - gcsfs>=2022.11.0 - jinja2>=3.1.2 - lxml>=4.9.2 - - matplotlib>=3.6.3, <3.8 + - matplotlib>=3.6.3 - numba>=0.56.4 - numexpr>=2.8.4 - odfpy>=1.4.1 diff --git a/environment.yml b/environment.yml index e41389c7f262a..aea71efd72f2c 100644 --- a/environment.yml +++ b/environment.yml @@ -35,7 +35,7 @@ dependencies: - ipython - jinja2>=3.1.2 - lxml>=4.9.2 - - matplotlib>=3.6.3, <3.8 + - matplotlib>=3.6.3 - numba>=0.56.4 - numexpr>=2.8.4 - openpyxl>=3.1.0 diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py index 5a7ceabbf554e..848fb77c942fb 100644 --- a/pandas/plotting/_matplotlib/converter.py +++ b/pandas/plotting/_matplotlib/converter.py @@ -64,6 +64,8 @@ if TYPE_CHECKING: from collections.abc import Generator + from matplotlib.axis import Axis + from pandas._libs.tslibs.offsets import BaseOffset @@ -187,7 +189,7 @@ class TimeFormatter(Formatter): def __init__(self, locs) -> None: self.locs = locs - def __call__(self, x, pos: int = 0) -> str: + def __call__(self, x, pos: int | None = 0) -> str: """ Return the time of day as a formatted string. @@ -364,8 +366,14 @@ def get_locator(self, dmin, dmax): locator = MilliSecondLocator(self.tz) locator.set_axis(self.axis) - locator.axis.set_view_interval(*self.axis.get_view_interval()) - locator.axis.set_data_interval(*self.axis.get_data_interval()) + # error: Item "None" of "Axis | _DummyAxis | _AxisWrapper | None" + # has no attribute "get_data_interval" + locator.axis.set_view_interval( # type: ignore[union-attr] + *self.axis.get_view_interval() # type: ignore[union-attr] + ) + locator.axis.set_data_interval( # type: ignore[union-attr] + *self.axis.get_data_interval() # type: ignore[union-attr] + ) return locator return mdates.AutoDateLocator.get_locator(self, dmin, dmax) @@ -950,6 +958,8 @@ class TimeSeries_DateLocator(Locator): day : {int}, optional """ + axis: Axis + def __init__( self, freq: BaseOffset, @@ -999,7 +1009,9 @@ def __call__(self): base = self.base (d, m) = divmod(vmin, base) vmin = (d + 1) * base - locs = list(range(vmin, vmax + 1, base)) + # error: No overload variant of "range" matches argument types "float", + # "float", "int" + locs = list(range(vmin, vmax + 1, base)) # type: ignore[call-overload] return locs def autoscale(self): @@ -1038,6 +1050,8 @@ class TimeSeries_DateFormatter(Formatter): Whether the formatter works in dynamic mode or not. """ + axis: Axis + def __init__( self, freq: BaseOffset, @@ -1084,7 +1098,7 @@ def set_locs(self, locs) -> None: (vmin, vmax) = (vmax, vmin) self._set_default_format(vmin, vmax) - def __call__(self, x, pos: int = 0) -> str: + def __call__(self, x, pos: int | None = 0) -> str: if self.formatdict is None: return "" else: @@ -1107,6 +1121,8 @@ class TimeSeries_TimedeltaFormatter(Formatter): Formats the ticks along an axis controlled by a :class:`TimedeltaIndex`. """ + axis: Axis + @staticmethod def format_timedelta_ticks(x, pos, n_decimals: int) -> str: """ @@ -1124,7 +1140,7 @@ def format_timedelta_ticks(x, pos, n_decimals: int) -> str: s = f"{int(d):d} days {s}" return s - def __call__(self, x, pos: int = 0) -> str: + def __call__(self, x, pos: int | None = 0) -> str: (vmin, vmax) = tuple(self.axis.get_view_interval()) n_decimals = min(int(np.ceil(np.log10(100 * 10**9 / abs(vmax - vmin)))), 9) return self.format_timedelta_ticks(x, pos, n_decimals) diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index 76b68c6b03dd2..6be8284d2a0be 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -529,9 +529,16 @@ def _maybe_right_yaxis(self, ax: Axes, axes_num: int) -> Axes: # otherwise, create twin axes orig_ax, new_ax = ax, ax.twinx() # TODO: use Matplotlib public API when available - new_ax._get_lines = orig_ax._get_lines - new_ax._get_patches_for_fill = orig_ax._get_patches_for_fill - orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax + new_ax._get_lines = orig_ax._get_lines # type: ignore[attr-defined] + # TODO #54485 + new_ax._get_patches_for_fill = ( # type: ignore[attr-defined] + orig_ax._get_patches_for_fill # type: ignore[attr-defined] + ) + # TODO #54485 + orig_ax.right_ax, new_ax.left_ax = ( # type: ignore[attr-defined] + new_ax, + orig_ax, + ) if not self._has_plotted_object(orig_ax): # no data on left y orig_ax.get_yaxis().set_visible(False) @@ -540,7 +547,7 @@ def _maybe_right_yaxis(self, ax: Axes, axes_num: int) -> Axes: new_ax.set_yscale("log") elif self.logy == "sym" or self.loglog == "sym": new_ax.set_yscale("symlog") - return new_ax + return new_ax # type: ignore[return-value] @final @cache_readonly @@ -1206,12 +1213,15 @@ def _get_errorbars( @final def _get_subplots(self, fig: Figure): - from matplotlib.axes import Subplot + if Version(mpl.__version__) < Version("3.8"): + from matplotlib.axes import Subplot as Klass + else: + from matplotlib.axes import Axes as Klass return [ ax for ax in fig.get_axes() - if (isinstance(ax, Subplot) and ax.get_subplotspec() is not None) + if (isinstance(ax, Klass) and ax.get_subplotspec() is not None) ] @final @@ -1255,8 +1265,10 @@ def _post_plot_logic(self, ax: Axes, data) -> None: x, y = self.x, self.y xlabel = self.xlabel if self.xlabel is not None else pprint_thing(x) ylabel = self.ylabel if self.ylabel is not None else pprint_thing(y) - ax.set_xlabel(xlabel) - ax.set_ylabel(ylabel) + # error: Argument 1 to "set_xlabel" of "_AxesBase" has incompatible + # type "Hashable"; expected "str" + ax.set_xlabel(xlabel) # type: ignore[arg-type] + ax.set_ylabel(ylabel) # type: ignore[arg-type] @final def _plot_colorbar(self, ax: Axes, *, fig: Figure, **kwds): @@ -1393,7 +1405,7 @@ def _get_norm_and_cmap(self, c_values, color_by_categorical: bool): else: cmap = None - if color_by_categorical: + if color_by_categorical and cmap is not None: from matplotlib import colors n_cats = len(self.data[c].cat.categories) @@ -1584,13 +1596,13 @@ def _ts_plot(self, ax: Axes, x, data: Series, style=None, **kwds): decorate_axes(ax.left_ax, freq, kwds) if hasattr(ax, "right_ax"): decorate_axes(ax.right_ax, freq, kwds) - ax._plot_data.append((data, self._kind, kwds)) + # TODO #54485 + ax._plot_data.append((data, self._kind, kwds)) # type: ignore[attr-defined] lines = self._plot(ax, data.index, np.asarray(data.values), style=style, **kwds) # set date formatter, locators and rescale limits - # error: Argument 3 to "format_dateaxis" has incompatible type "Index"; - # expected "DatetimeIndex | PeriodIndex" - format_dateaxis(ax, ax.freq, data.index) # type: ignore[arg-type] + # TODO #54485 + format_dateaxis(ax, ax.freq, data.index) # type: ignore[arg-type, attr-defined] return lines @final @@ -1606,11 +1618,15 @@ def _initialize_stacker(cls, ax: Axes, stacking_id, n: int) -> None: if stacking_id is None: return if not hasattr(ax, "_stacker_pos_prior"): - ax._stacker_pos_prior = {} + # TODO #54485 + ax._stacker_pos_prior = {} # type: ignore[attr-defined] if not hasattr(ax, "_stacker_neg_prior"): - ax._stacker_neg_prior = {} - ax._stacker_pos_prior[stacking_id] = np.zeros(n) - ax._stacker_neg_prior[stacking_id] = np.zeros(n) + # TODO #54485 + ax._stacker_neg_prior = {} # type: ignore[attr-defined] + # TODO #54485 + ax._stacker_pos_prior[stacking_id] = np.zeros(n) # type: ignore[attr-defined] + # TODO #54485 + ax._stacker_neg_prior[stacking_id] = np.zeros(n) # type: ignore[attr-defined] @final @classmethod @@ -1624,9 +1640,17 @@ def _get_stacked_values( cls._initialize_stacker(ax, stacking_id, len(values)) if (values >= 0).all(): - return ax._stacker_pos_prior[stacking_id] + values + # TODO #54485 + return ( + ax._stacker_pos_prior[stacking_id] # type: ignore[attr-defined] + + values + ) elif (values <= 0).all(): - return ax._stacker_neg_prior[stacking_id] + values + # TODO #54485 + return ( + ax._stacker_neg_prior[stacking_id] # type: ignore[attr-defined] + + values + ) raise ValueError( "When stacked is True, each column must be either " @@ -1640,9 +1664,11 @@ def _update_stacker(cls, ax: Axes, stacking_id: int | None, values) -> None: if stacking_id is None: return if (values >= 0).all(): - ax._stacker_pos_prior[stacking_id] += values + # TODO #54485 + ax._stacker_pos_prior[stacking_id] += values # type: ignore[attr-defined] elif (values <= 0).all(): - ax._stacker_neg_prior[stacking_id] += values + # TODO #54485 + ax._stacker_neg_prior[stacking_id] += values # type: ignore[attr-defined] def _post_plot_logic(self, ax: Axes, data) -> None: from matplotlib.ticker import FixedLocator @@ -1658,7 +1684,9 @@ def get_label(i): if self._need_to_set_index: xticks = ax.get_xticks() xticklabels = [get_label(x) for x in xticks] - ax.xaxis.set_major_locator(FixedLocator(xticks)) + # error: Argument 1 to "FixedLocator" has incompatible type "ndarray[Any, + # Any]"; expected "Sequence[float]" + ax.xaxis.set_major_locator(FixedLocator(xticks)) # type: ignore[arg-type] ax.set_xticklabels(xticklabels) # If the index is an irregular time series, then by default @@ -1737,9 +1765,11 @@ def _plot( # type: ignore[override] if stacking_id is None: start = np.zeros(len(y)) elif (y >= 0).all(): - start = ax._stacker_pos_prior[stacking_id] + # TODO #54485 + start = ax._stacker_pos_prior[stacking_id] # type: ignore[attr-defined] elif (y <= 0).all(): - start = ax._stacker_neg_prior[stacking_id] + # TODO #54485 + start = ax._stacker_neg_prior[stacking_id] # type: ignore[attr-defined] else: start = np.zeros(len(y)) @@ -2005,7 +2035,9 @@ def _decorate_ticks( ax.set_yticklabels(ticklabels) if name is not None and self.use_index: ax.set_ylabel(name) - ax.set_xlabel(self.xlabel) + # error: Argument 1 to "set_xlabel" of "_AxesBase" has incompatible type + # "Hashable | None"; expected "str" + ax.set_xlabel(self.xlabel) # type: ignore[arg-type] class PiePlot(MPLPlot): diff --git a/pandas/plotting/_matplotlib/hist.py b/pandas/plotting/_matplotlib/hist.py index f5b415f12f37d..de4fd91541a9d 100644 --- a/pandas/plotting/_matplotlib/hist.py +++ b/pandas/plotting/_matplotlib/hist.py @@ -199,11 +199,21 @@ def _get_column_weights(weights, i: int, y): def _post_plot_logic(self, ax: Axes, data) -> None: if self.orientation == "horizontal": - ax.set_xlabel("Frequency" if self.xlabel is None else self.xlabel) - ax.set_ylabel(self.ylabel) + # error: Argument 1 to "set_xlabel" of "_AxesBase" has incompatible + # type "Hashable"; expected "str" + ax.set_xlabel( + "Frequency" + if self.xlabel is None + else self.xlabel # type: ignore[arg-type] + ) + ax.set_ylabel(self.ylabel) # type: ignore[arg-type] else: - ax.set_xlabel(self.xlabel) - ax.set_ylabel("Frequency" if self.ylabel is None else self.ylabel) + ax.set_xlabel(self.xlabel) # type: ignore[arg-type] + ax.set_ylabel( + "Frequency" + if self.ylabel is None + else self.ylabel # type: ignore[arg-type] + ) @property def orientation(self) -> PlottingOrientation: @@ -447,8 +457,14 @@ def hist_series( ax.grid(grid) axes = np.array([ax]) + # error: Argument 1 to "set_ticks_props" has incompatible type "ndarray[Any, + # dtype[Any]]"; expected "Axes | Sequence[Axes]" set_ticks_props( - axes, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot + axes, # type: ignore[arg-type] + xlabelsize=xlabelsize, + xrot=xrot, + ylabelsize=ylabelsize, + yrot=yrot, ) else: diff --git a/pandas/plotting/_matplotlib/style.py b/pandas/plotting/_matplotlib/style.py index a5f34e9434cb7..bf4e4be3bfd82 100644 --- a/pandas/plotting/_matplotlib/style.py +++ b/pandas/plotting/_matplotlib/style.py @@ -269,7 +269,9 @@ def _is_single_string_color(color: Color) -> bool: """ conv = matplotlib.colors.ColorConverter() try: - conv.to_rgba(color) + # error: Argument 1 to "to_rgba" of "ColorConverter" has incompatible type + # "str | Sequence[float]"; expected "tuple[float, float, float] | ..." + conv.to_rgba(color) # type: ignore[arg-type] except ValueError: return False else: diff --git a/pandas/plotting/_matplotlib/timeseries.py b/pandas/plotting/_matplotlib/timeseries.py index 5471305b03baf..f0b68e5dde450 100644 --- a/pandas/plotting/_matplotlib/timeseries.py +++ b/pandas/plotting/_matplotlib/timeseries.py @@ -126,7 +126,7 @@ def _upsample_others(ax: Axes, freq: BaseOffset, kwargs: dict[str, Any]) -> None labels.extend(rlabels) if legend is not None and kwargs.get("legend", True) and len(lines) > 0: - title = legend.get_title().get_text() + title: str | None = legend.get_title().get_text() if title == "None": title = None ax.legend(lines, labels, loc="best", title=title) @@ -136,7 +136,8 @@ def _replot_ax(ax: Axes, freq: BaseOffset, kwargs: dict[str, Any]): data = getattr(ax, "_plot_data", None) # clear current axes and data - ax._plot_data = [] + # TODO #54485 + ax._plot_data = [] # type: ignore[attr-defined] ax.clear() decorate_axes(ax, freq, kwargs) @@ -148,7 +149,8 @@ def _replot_ax(ax: Axes, freq: BaseOffset, kwargs: dict[str, Any]): series = series.copy() idx = series.index.asfreq(freq, how="S") series.index = idx - ax._plot_data.append((series, plotf, kwds)) + # TODO #54485 + ax._plot_data.append((series, plotf, kwds)) # type: ignore[attr-defined] # for tsplot if isinstance(plotf, str): @@ -165,17 +167,23 @@ def _replot_ax(ax: Axes, freq: BaseOffset, kwargs: dict[str, Any]): def decorate_axes(ax: Axes, freq: BaseOffset, kwargs: dict[str, Any]) -> None: """Initialize axes for time-series plotting""" if not hasattr(ax, "_plot_data"): - ax._plot_data = [] + # TODO #54485 + ax._plot_data = [] # type: ignore[attr-defined] - ax.freq = freq + # TODO #54485 + ax.freq = freq # type: ignore[attr-defined] xaxis = ax.get_xaxis() - xaxis.freq = freq + # TODO #54485 + xaxis.freq = freq # type: ignore[attr-defined] if not hasattr(ax, "legendlabels"): - ax.legendlabels = [kwargs.get("label", None)] + # TODO #54485 + ax.legendlabels = [kwargs.get("label", None)] # type: ignore[attr-defined] else: ax.legendlabels.append(kwargs.get("label", None)) - ax.view_interval = None - ax.date_axis_info = None + # TODO #54485 + ax.view_interval = None # type: ignore[attr-defined] + # TODO #54485 + ax.date_axis_info = None # type: ignore[attr-defined] def _get_ax_freq(ax: Axes): diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py index 8c0e401f991a6..898b5b25e7b01 100644 --- a/pandas/plotting/_matplotlib/tools.py +++ b/pandas/plotting/_matplotlib/tools.py @@ -52,10 +52,12 @@ def maybe_adjust_figure(fig: Figure, *args, **kwargs) -> None: def format_date_labels(ax: Axes, rot) -> None: # mini version of autofmt_xdate for label in ax.get_xticklabels(): - label.set_ha("right") + label.set_horizontalalignment("right") label.set_rotation(rot) fig = ax.get_figure() - maybe_adjust_figure(fig, bottom=0.2) + if fig is not None: + # should always be a Figure but can technically be None + maybe_adjust_figure(fig, bottom=0.2) def table( @@ -76,8 +78,14 @@ def table( cellText = data.values + # error: Argument "cellText" to "table" has incompatible type "ndarray[Any, + # Any]"; expected "Sequence[Sequence[str]] | None" return matplotlib.table.table( - ax, cellText=cellText, rowLabels=rowLabels, colLabels=colLabels, **kwargs + ax, + cellText=cellText, # type: ignore[arg-type] + rowLabels=rowLabels, + colLabels=colLabels, + **kwargs, ) @@ -369,12 +377,12 @@ def _has_externally_shared_axis(ax1: Axes, compare_axis: str) -> bool: "_has_externally_shared_axis() needs 'x' or 'y' as a second parameter" ) - axes = axes.get_siblings(ax1) + axes_siblings = axes.get_siblings(ax1) # Retain ax1 and any of its siblings which aren't in the same position as it ax1_points = ax1.get_position().get_points() - for ax2 in axes: + for ax2 in axes_siblings: if not np.array_equal(ax1_points, ax2.get_position().get_points()): return True diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index b274866b7c9a8..9ac20774b8c93 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -3179,7 +3179,9 @@ def dtype_backend_data() -> DataFrame: @pytest.fixture def dtype_backend_expected(): - def func(storage, dtype_backend, conn_name): + def func(storage, dtype_backend, conn_name) -> DataFrame: + string_array: StringArray | ArrowStringArray + string_array_na: StringArray | ArrowStringArray if storage == "python": string_array = StringArray(np.array(["a", "b", "c"], dtype=np.object_)) string_array_na = StringArray(np.array(["a", "b", pd.NA], dtype=np.object_)) diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py index fd5a66049bd24..69120160699c2 100644 --- a/pandas/tests/plotting/common.py +++ b/pandas/tests/plotting/common.py @@ -328,7 +328,7 @@ def _check_axes_shape(axes, axes_num=None, layout=None, figsize=None): ) -def _flatten_visible(axes): +def _flatten_visible(axes: Axes | Sequence[Axes]) -> Sequence[Axes]: """ Flatten axes, and filter only visible @@ -339,8 +339,8 @@ def _flatten_visible(axes): """ from pandas.plotting._matplotlib.tools import flatten_axes - axes = flatten_axes(axes) - axes = [ax for ax in axes if ax.get_visible()] + axes_ndarray = flatten_axes(axes) + axes = [ax for ax in axes_ndarray if ax.get_visible()] return axes diff --git a/pyright_reportGeneralTypeIssues.json b/pyright_reportGeneralTypeIssues.json index c059b9c589ecd..e155b34053069 100644 --- a/pyright_reportGeneralTypeIssues.json +++ b/pyright_reportGeneralTypeIssues.json @@ -99,6 +99,9 @@ "pandas/io/sql.py", "pandas/io/stata.py", "pandas/plotting/_matplotlib/boxplot.py", + "pandas/plotting/_matplotlib/core.py", + "pandas/plotting/_matplotlib/timeseries.py", + "pandas/plotting/_matplotlib/tools.py", "pandas/tseries/frequencies.py", "pandas/tseries/holiday.py", ], diff --git a/requirements-dev.txt b/requirements-dev.txt index 490e170299030..faf915f4b9716 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -24,7 +24,7 @@ gcsfs>=2022.11.0 ipython jinja2>=3.1.2 lxml>=4.9.2 -matplotlib>=3.6.3, <3.8 +matplotlib>=3.6.3 numba>=0.56.4 numexpr>=2.8.4 openpyxl>=3.1.0
Type changes for matplotlib 3.8. There is more work needed: - failing CI with 3.8 https://github.com/pandas-dev/pandas/pull/55210#issuecomment-1732096783 - refactor the use of matplotlib to not set non-existing attributes matploblib object and to avoid using private methods (see mypy errors below). This can be done in a separate PR (or we add type ignores for now). Typing errors left with 3.8: ``` pandas/plotting/_matplotlib/timeseries.py:127: error: "Axes" has no attribute "_plot_data"; maybe "plot_date"? [attr-defined] pandas/plotting/_matplotlib/timeseries.py:139: error: "Axes" has no attribute "_plot_data"; maybe "plot_date"? [attr-defined] pandas/plotting/_matplotlib/timeseries.py:156: error: "Axes" has no attribute "_plot_data"; maybe "plot_date"? [attr-defined] pandas/plotting/_matplotlib/timeseries.py:158: error: "Axes" has no attribute "freq" [attr-defined] pandas/plotting/_matplotlib/timeseries.py:160: error: "XAxis" has no attribute "freq" [attr-defined] pandas/plotting/_matplotlib/timeseries.py:162: error: "Axes" has no attribute "legendlabels" [attr-defined] pandas/plotting/_matplotlib/timeseries.py:165: error: "Axes" has no attribute "view_interval" [attr-defined] pandas/plotting/_matplotlib/timeseries.py:166: error: "Axes" has no attribute "date_axis_info" [attr-defined] pandas/plotting/_matplotlib/core.py:468: error: "Axes" has no attribute "containers" [attr-defined] pandas/plotting/_matplotlib/core.py:485: error: "_AxesBase" has no attribute "_get_lines"; maybe "get_lines"? [attr-defined] pandas/plotting/_matplotlib/core.py:485: error: "Axes" has no attribute "_get_lines" [attr-defined] pandas/plotting/_matplotlib/core.py:486: error: "_AxesBase" has no attribute "_get_patches_for_fill" [attr-defined] pandas/plotting/_matplotlib/core.py:486: error: "Axes" has no attribute "_get_patches_for_fill" [attr-defined] pandas/plotting/_matplotlib/core.py:487: error: "Axes" has no attribute "right_ax" [attr-defined] pandas/plotting/_matplotlib/core.py:487: error: "_AxesBase" has no attribute "left_ax" [attr-defined] pandas/plotting/_matplotlib/core.py:1440: error: "Axes" has no attribute "_plot_data"; maybe "plot_date"? [attr-defined] pandas/plotting/_matplotlib/core.py:1444: error: "Axes" has no attribute "freq" [attr-defined] pandas/plotting/_matplotlib/core.py:1458: error: "Axes" has no attribute "_stacker_pos_prior" [attr-defined] pandas/plotting/_matplotlib/core.py:1460: error: "Axes" has no attribute "_stacker_neg_prior" [attr-defined] pandas/plotting/_matplotlib/core.py:1461: error: "Axes" has no attribute "_stacker_pos_prior" [attr-defined] pandas/plotting/_matplotlib/core.py:1462: error: "Axes" has no attribute "_stacker_neg_prior" [attr-defined] pandas/plotting/_matplotlib/core.py:1473: error: "Axes" has no attribute "_stacker_pos_prior" [attr-defined] pandas/plotting/_matplotlib/core.py:1475: error: "Axes" has no attribute "_stacker_neg_prior" [attr-defined] pandas/plotting/_matplotlib/core.py:1488: error: "Axes" has no attribute "_stacker_pos_prior" [attr-defined] pandas/plotting/_matplotlib/core.py:1490: error: "Axes" has no attribute "_stacker_neg_prior" [attr-defined] pandas/plotting/_matplotlib/core.py:1590: error: "Axes" has no attribute "_stacker_pos_prior" [attr-defined] pandas/plotting/_matplotlib/core.py:1592: error: "Axes" has no attribute "_stacker_neg_prior" [attr-defined] ``` This PR will obviously fail as it doesn't update matplotlib itself.
https://api.github.com/repos/pandas-dev/pandas/pulls/55253
2023-09-22T23:53:31Z
2023-11-15T21:38:02Z
2023-11-15T21:38:02Z
2023-12-10T04:33:46Z
DEPR: 'A' for yearly frequency and YearEnd in favour of 'Y'
diff --git a/asv_bench/benchmarks/tslibs/period.py b/asv_bench/benchmarks/tslibs/period.py index a92fbbe8d4dbe..67f3b7736018d 100644 --- a/asv_bench/benchmarks/tslibs/period.py +++ b/asv_bench/benchmarks/tslibs/period.py @@ -72,7 +72,7 @@ def time_now(self, freq): self.per.now(freq) def time_asfreq(self, freq): - self.per.asfreq("A") + self.per.asfreq("Y") def time_str(self, freq): str(self.per) diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index 6bd181740c78d..c2fe277c4f4e5 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -2332,7 +2332,7 @@ A few notes on the generated table schema: .. ipython:: python - s_per = pd.Series(1, index=pd.period_range("2016", freq="A-DEC", periods=4)) + s_per = pd.Series(1, index=pd.period_range("2016", freq="Y-DEC", periods=4)) build_table_schema(s_per) * Categoricals use the ``any`` type and an ``enum`` constraint listing diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst index 113efeefb48ce..3ec303323b887 100644 --- a/doc/source/user_guide/timeseries.rst +++ b/doc/source/user_guide/timeseries.rst @@ -895,7 +895,7 @@ into ``freq`` keyword arguments. The available date offsets and associated frequ :class:`~pandas.tseries.offsets.BQuarterEnd`, ``'BQ``, "business quarter end" :class:`~pandas.tseries.offsets.BQuarterBegin`, ``'BQS'``, "business quarter begin" :class:`~pandas.tseries.offsets.FY5253Quarter`, ``'REQ'``, "retail (aka 52-53 week) quarter" - :class:`~pandas.tseries.offsets.YearEnd`, ``'A'``, "calendar year end" + :class:`~pandas.tseries.offsets.YearEnd`, ``'Y'``, "calendar year end" :class:`~pandas.tseries.offsets.YearBegin`, ``'AS'`` or ``'BYS'``,"calendar year begin" :class:`~pandas.tseries.offsets.BYearEnd`, ``'BA'``, "business year end" :class:`~pandas.tseries.offsets.BYearBegin`, ``'BAS'``, "business year begin" @@ -1258,7 +1258,7 @@ frequencies. We will refer to these aliases as *offset aliases*. "BQ", "business quarter end frequency" "QS", "quarter start frequency" "BQS", "business quarter start frequency" - "A, Y", "year end frequency" + "Y", "year end frequency" "BA, BY", "business year end frequency" "AS, YS", "year start frequency" "BAS, BYS", "business year start frequency" @@ -1321,7 +1321,7 @@ frequencies. We will refer to these aliases as *period aliases*. "W", "weekly frequency" "M", "monthly frequency" "Q", "quarterly frequency" - "A, Y", "yearly frequency" + "Y", "yearly frequency" "H", "hourly frequency" "min", "minutely frequency" "s", "secondly frequency" @@ -1331,8 +1331,8 @@ frequencies. We will refer to these aliases as *period aliases*. .. deprecated:: 2.2.0 - Aliases ``T``, ``S``, ``L``, ``U``, and ``N`` are deprecated in favour of the aliases - ``min``, ``s``, ``ms``, ``us``, and ``ns``. + Aliases ``A``, ``T``, ``S``, ``L``, ``U``, and ``N`` are deprecated in favour of the aliases + ``Y``, ``min``, ``s``, ``ms``, ``us``, and ``ns``. Combining aliases @@ -1383,18 +1383,18 @@ For some frequencies you can specify an anchoring suffix: "(B)Q(S)\-SEP", "quarterly frequency, year ends in September" "(B)Q(S)\-OCT", "quarterly frequency, year ends in October" "(B)Q(S)\-NOV", "quarterly frequency, year ends in November" - "(B)A(S)\-DEC", "annual frequency, anchored end of December. Same as 'A'" - "(B)A(S)\-JAN", "annual frequency, anchored end of January" - "(B)A(S)\-FEB", "annual frequency, anchored end of February" - "(B)A(S)\-MAR", "annual frequency, anchored end of March" - "(B)A(S)\-APR", "annual frequency, anchored end of April" - "(B)A(S)\-MAY", "annual frequency, anchored end of May" - "(B)A(S)\-JUN", "annual frequency, anchored end of June" - "(B)A(S)\-JUL", "annual frequency, anchored end of July" - "(B)A(S)\-AUG", "annual frequency, anchored end of August" - "(B)A(S)\-SEP", "annual frequency, anchored end of September" - "(B)A(S)\-OCT", "annual frequency, anchored end of October" - "(B)A(S)\-NOV", "annual frequency, anchored end of November" + "(B)Y(S)\-DEC", "annual frequency, anchored end of December. Same as 'Y'" + "(B)Y(S)\-JAN", "annual frequency, anchored end of January" + "(B)Y(S)\-FEB", "annual frequency, anchored end of February" + "(B)Y(S)\-MAR", "annual frequency, anchored end of March" + "(B)Y(S)\-APR", "annual frequency, anchored end of April" + "(B)Y(S)\-MAY", "annual frequency, anchored end of May" + "(B)Y(S)\-JUN", "annual frequency, anchored end of June" + "(B)Y(S)\-JUL", "annual frequency, anchored end of July" + "(B)Y(S)\-AUG", "annual frequency, anchored end of August" + "(B)Y(S)\-SEP", "annual frequency, anchored end of September" + "(B)Y(S)\-OCT", "annual frequency, anchored end of October" + "(B)Y(S)\-NOV", "annual frequency, anchored end of November" These can be used as arguments to ``date_range``, ``bdate_range``, constructors for ``DatetimeIndex``, as well as various other timeseries-related functions @@ -1690,7 +1690,7 @@ the end of the interval. .. warning:: The default values for ``label`` and ``closed`` is '**left**' for all - frequency offsets except for 'ME', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' + frequency offsets except for 'ME', 'Y', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. This might unintendedly lead to looking ahead, where the value for a later @@ -1995,7 +1995,7 @@ Because ``freq`` represents a span of ``Period``, it cannot be negative like "-3 .. ipython:: python - pd.Period("2012", freq="A-DEC") + pd.Period("2012", freq="Y-DEC") pd.Period("2012-1-1", freq="D") @@ -2008,7 +2008,7 @@ frequency. Arithmetic is not allowed between ``Period`` with different ``freq`` .. ipython:: python - p = pd.Period("2012", freq="A-DEC") + p = pd.Period("2012", freq="Y-DEC") p + 1 p - 3 p = pd.Period("2012-01", freq="2M") @@ -2050,7 +2050,7 @@ return the number of frequency units between them: .. ipython:: python - pd.Period("2012", freq="A-DEC") - pd.Period("2002", freq="A-DEC") + pd.Period("2012", freq="Y-DEC") - pd.Period("2002", freq="Y-DEC") PeriodIndex and period_range ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -2184,7 +2184,7 @@ method. Let's start with the fiscal year 2011, ending in December: .. ipython:: python - p = pd.Period("2011", freq="A-DEC") + p = pd.Period("2011", freq="Y-DEC") p We can convert it to a monthly frequency. Using the ``how`` parameter, we can @@ -2211,10 +2211,10 @@ input period: p = pd.Period("2011-12", freq="M") - p.asfreq("A-NOV") + p.asfreq("Y-NOV") Note that since we converted to an annual frequency that ends the year in -November, the monthly period of December 2011 is actually in the 2012 A-NOV +November, the monthly period of December 2011 is actually in the 2012 Y-NOV period. .. _timeseries.quarterly: diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 445b93705cde5..b0d4ed730ce57 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -222,6 +222,7 @@ Other Deprecations - Deprecated downcasting behavior in :meth:`Series.where`, :meth:`DataFrame.where`, :meth:`Series.mask`, :meth:`DataFrame.mask`, :meth:`Series.clip`, :meth:`DataFrame.clip`; in a future version these will not infer object-dtype columns to non-object dtype, or all-round floats to integer dtype. Call ``result.infer_objects(copy=False)`` on the result for object inference, or explicitly cast floats to ints. To opt in to the future version, use ``pd.set_option("future.no_silent_downcasting", True)`` (:issue:`53656`) - Deprecated including the groups in computations when using :meth:`DataFrameGroupBy.apply` and :meth:`DataFrameGroupBy.resample`; pass ``include_groups=False`` to exclude the groups (:issue:`7155`) - Deprecated not passing a tuple to :class:`DataFrameGroupBy.get_group` or :class:`SeriesGroupBy.get_group` when grouping by a length-1 list-like (:issue:`25971`) +- Deprecated string ``A`` denoting frequency in :class:`YearEnd` and strings ``A-DEC``, ``A-JAN``, etc. denoting annual frequencies with various fiscal year ends (:issue:`52536`) - Deprecated strings ``S``, ``U``, and ``N`` denoting units in :func:`to_timedelta` (:issue:`52536`) - Deprecated strings ``T``, ``S``, ``L``, ``U``, and ``N`` denoting frequencies in :class:`Minute`, :class:`Second`, :class:`Milli`, :class:`Micro`, :class:`Nano` (:issue:`52536`) - Deprecated strings ``T``, ``S``, ``L``, ``U``, and ``N`` denoting units in :class:`Timedelta` (:issue:`52536`) diff --git a/pandas/_libs/tslibs/dtypes.pxd b/pandas/_libs/tslibs/dtypes.pxd index e050ac5a6c7b7..bda4fcf04234b 100644 --- a/pandas/_libs/tslibs/dtypes.pxd +++ b/pandas/_libs/tslibs/dtypes.pxd @@ -13,6 +13,8 @@ cpdef bint is_supported_unit(NPY_DATETIMEUNIT reso) cpdef freq_to_period_freqstr(freq_n, freq_name) cdef dict c_OFFSET_TO_PERIOD_FREQSTR +cdef dict c_OFFSET_DEPR_FREQSTR +cdef dict c_REVERSE_OFFSET_DEPR_FREQSTR cdef dict c_DEPR_ABBREVS cdef dict attrname_to_abbrevs cdef dict npy_unit_to_attrname diff --git a/pandas/_libs/tslibs/dtypes.pyi b/pandas/_libs/tslibs/dtypes.pyi index 72a8fa8ff0b38..d8680ed2d27b4 100644 --- a/pandas/_libs/tslibs/dtypes.pyi +++ b/pandas/_libs/tslibs/dtypes.pyi @@ -7,6 +7,7 @@ from pandas._libs.tslibs.timedeltas import UnitChoices _attrname_to_abbrevs: dict[str, str] _period_code_map: dict[str, int] OFFSET_TO_PERIOD_FREQSTR: dict[str, str] +OFFSET_DEPR_FREQSTR: dict[str, str] DEPR_ABBREVS: dict[str, UnitChoices] def periods_per_day(reso: int) -> int: ... diff --git a/pandas/_libs/tslibs/dtypes.pyx b/pandas/_libs/tslibs/dtypes.pyx index cca379c620aeb..ebb6e3a240cbe 100644 --- a/pandas/_libs/tslibs/dtypes.pyx +++ b/pandas/_libs/tslibs/dtypes.pyx @@ -101,19 +101,19 @@ cdef class PeriodDtypeBase: _period_code_map = { # Annual freqs with various fiscal year ends. - # eg, 2005 for A-FEB runs Mar 1, 2004 to Feb 28, 2005 - "A-DEC": PeriodDtypeCode.A_DEC, # Annual - December year end - "A-JAN": PeriodDtypeCode.A_JAN, # Annual - January year end - "A-FEB": PeriodDtypeCode.A_FEB, # Annual - February year end - "A-MAR": PeriodDtypeCode.A_MAR, # Annual - March year end - "A-APR": PeriodDtypeCode.A_APR, # Annual - April year end - "A-MAY": PeriodDtypeCode.A_MAY, # Annual - May year end - "A-JUN": PeriodDtypeCode.A_JUN, # Annual - June year end - "A-JUL": PeriodDtypeCode.A_JUL, # Annual - July year end - "A-AUG": PeriodDtypeCode.A_AUG, # Annual - August year end - "A-SEP": PeriodDtypeCode.A_SEP, # Annual - September year end - "A-OCT": PeriodDtypeCode.A_OCT, # Annual - October year end - "A-NOV": PeriodDtypeCode.A_NOV, # Annual - November year end + # eg, 2005 for Y-FEB runs Mar 1, 2004 to Feb 28, 2005 + "Y-DEC": PeriodDtypeCode.A_DEC, # Annual - December year end + "Y-JAN": PeriodDtypeCode.A_JAN, # Annual - January year end + "Y-FEB": PeriodDtypeCode.A_FEB, # Annual - February year end + "Y-MAR": PeriodDtypeCode.A_MAR, # Annual - March year end + "Y-APR": PeriodDtypeCode.A_APR, # Annual - April year end + "Y-MAY": PeriodDtypeCode.A_MAY, # Annual - May year end + "Y-JUN": PeriodDtypeCode.A_JUN, # Annual - June year end + "Y-JUL": PeriodDtypeCode.A_JUL, # Annual - July year end + "Y-AUG": PeriodDtypeCode.A_AUG, # Annual - August year end + "Y-SEP": PeriodDtypeCode.A_SEP, # Annual - September year end + "Y-OCT": PeriodDtypeCode.A_OCT, # Annual - October year end + "Y-NOV": PeriodDtypeCode.A_NOV, # Annual - November year end # Quarterly frequencies with various fiscal year ends. # eg, Q42005 for Q-OCT runs Aug 1, 2005 to Oct 31, 2005 @@ -156,22 +156,22 @@ _reverse_period_code_map = { # Yearly aliases; careful not to put these in _reverse_period_code_map _period_code_map.update({"Y" + key[1:]: _period_code_map[key] for key in _period_code_map - if key.startswith("A-")}) + if key.startswith("Y-")}) _period_code_map.update({ "Q": 2000, # Quarterly - December year end (default quarterly) - "A": PeriodDtypeCode.A, # Annual + "Y": PeriodDtypeCode.A, # Annual "W": 4000, # Weekly "C": 5000, # Custom Business Day }) cdef set _month_names = { - x.split("-")[-1] for x in _period_code_map.keys() if x.startswith("A-") + x.split("-")[-1] for x in _period_code_map.keys() if x.startswith("Y-") } # Map attribute-name resolutions to resolution abbreviations _attrname_to_abbrevs = { - "year": "A", + "year": "Y", "quarter": "Q", "month": "M", "day": "D", @@ -192,9 +192,9 @@ OFFSET_TO_PERIOD_FREQSTR: dict = { "BQS": "Q", "QS": "Q", "BQ": "Q", - "BA": "A", - "AS": "A", - "BAS": "A", + "BA": "Y", + "AS": "Y", + "BAS": "Y", "MS": "M", "D": "D", "B": "B", @@ -205,15 +205,19 @@ OFFSET_TO_PERIOD_FREQSTR: dict = { "ns": "ns", "H": "H", "Q": "Q", - "A": "A", + "Y": "Y", "W": "W", "ME": "M", - "Y": "A", - "BY": "A", - "YS": "A", - "BYS": "A", + "BY": "Y", + "YS": "Y", + "BYS": "Y", +} +OFFSET_DEPR_FREQSTR: dict[str, str]= { + "M": "ME", } cdef dict c_OFFSET_TO_PERIOD_FREQSTR = OFFSET_TO_PERIOD_FREQSTR +cdef dict c_OFFSET_DEPR_FREQSTR = OFFSET_DEPR_FREQSTR +cdef dict c_REVERSE_OFFSET_DEPR_FREQSTR = {v: k for k, v in OFFSET_DEPR_FREQSTR.items()} cpdef freq_to_period_freqstr(freq_n, freq_name): if freq_n == 1: @@ -226,6 +230,20 @@ cpdef freq_to_period_freqstr(freq_n, freq_name): # Map deprecated resolution abbreviations to correct resolution abbreviations DEPR_ABBREVS: dict[str, str]= { + "A": "Y", + "a": "Y", + "A-DEC": "Y-DEC", + "A-JAN": "Y-JAN", + "A-FEB": "Y-FEB", + "A-MAR": "Y-MAR", + "A-APR": "Y-APR", + "A-MAY": "Y-MAY", + "A-JUN": "Y-JUN", + "A-JUL": "Y-JUL", + "A-AUG": "Y-AUG", + "A-SEP": "Y-SEP", + "A-OCT": "Y-OCT", + "A-NOV": "Y-NOV", "T": "min", "t": "min", "S": "s", diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 74398eb0e2405..a24c2ce7f4b8a 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -15,6 +15,7 @@ from cpython.datetime cimport ( time as dt_time, timedelta, ) + import warnings import_datetime() @@ -48,7 +49,6 @@ from pandas._libs.tslibs.ccalendar import ( ) from pandas.util._exceptions import find_stack_level - from pandas._libs.tslibs.ccalendar cimport ( dayofweek, get_days_in_month, @@ -58,6 +58,8 @@ from pandas._libs.tslibs.ccalendar cimport ( from pandas._libs.tslibs.conversion cimport localize_pydatetime from pandas._libs.tslibs.dtypes cimport ( c_DEPR_ABBREVS, + c_OFFSET_DEPR_FREQSTR, + c_REVERSE_OFFSET_DEPR_FREQSTR, periods_per_day, ) from pandas._libs.tslibs.nattype cimport ( @@ -2496,7 +2498,7 @@ cdef class YearEnd(YearOffset): """ _default_month = 12 - _prefix = "A" + _prefix = "Y" _day_opt = "end" cdef readonly: @@ -4447,7 +4449,7 @@ prefix_mapping = { offset._prefix: offset for offset in [ YearBegin, # 'AS' - YearEnd, # 'A' + YearEnd, # 'Y' BYearBegin, # 'BAS' BYearEnd, # 'BA' BusinessDay, # 'B' @@ -4489,8 +4491,7 @@ _lite_rule_alias = { "W": "W-SUN", "Q": "Q-DEC", - "A": "A-DEC", # YearEnd(month=12), - "Y": "A-DEC", + "Y": "Y-DEC", # YearEnd(month=12), "AS": "AS-JAN", # YearBegin(month=1), "YS": "AS-JAN", "BA": "BA-DEC", # BYearEnd(month=12), @@ -4615,21 +4616,22 @@ cpdef to_offset(freq, bint is_period=False): tups = zip(split[0::4], split[1::4], split[2::4]) for n, (sep, stride, name) in enumerate(tups): - if is_period is False and name == "M": + if is_period is False and name in c_OFFSET_DEPR_FREQSTR: warnings.warn( - "\'M\' will be deprecated, please use \'ME\' " - "for \'month end\'", + f"\'{name}\' will be deprecated, please use " + f"\'{c_OFFSET_DEPR_FREQSTR.get(name)}\' instead.", UserWarning, stacklevel=find_stack_level(), ) - name = "ME" - if is_period is True and name == "ME": + name = c_OFFSET_DEPR_FREQSTR[name] + if is_period is True and name in c_REVERSE_OFFSET_DEPR_FREQSTR: raise ValueError( - r"for Period, please use \'M\' " - "instead of \'ME\'" + f"for Period, please use " + f"\'{c_REVERSE_OFFSET_DEPR_FREQSTR.get(name)}\' " + f"instead of \'{name}\'" ) - elif is_period is True and name == "M": - name = "ME" + elif is_period is True and name in c_OFFSET_DEPR_FREQSTR: + name = c_OFFSET_DEPR_FREQSTR.get(name) if sep != "" and not sep.isspace(): raise ValueError("separator must be spaces") @@ -4648,6 +4650,7 @@ cpdef to_offset(freq, bint is_period=False): stacklevel=find_stack_level(), ) prefix = c_DEPR_ABBREVS[prefix] + if prefix in {"D", "H", "min", "s", "ms", "us", "ns"}: # For these prefixes, we have something like "3H" or # "2.5T", so we can construct a Timedelta with the @@ -4661,7 +4664,7 @@ cpdef to_offset(freq, bint is_period=False): offset *= stride_sign else: stride = int(stride) - offset = _get_offset(name) + offset = _get_offset(prefix) offset = offset * int(np.fabs(stride) * stride_sign) if delta is None: diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 1b4332c2d26cf..8bf1ebb9bf608 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -1249,7 +1249,7 @@ cdef class _Timestamp(ABCTimestamp): >>> ts = pd.Timestamp('2020-03-14T15:32:52.192548651') >>> # Year end frequency >>> ts.to_period(freq='Y') - Period('2020', 'A-DEC') + Period('2020', 'Y-DEC') >>> # Month end frequency >>> ts.to_period(freq='M') diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index 4b79d0dbb683e..5b5ce4c4d057b 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -2467,7 +2467,7 @@ def _round_temporally( if offset is None: raise ValueError(f"Must specify a valid frequency: {freq}") pa_supported_unit = { - "A": "year", + "Y": "year", "AS": "year", "Q": "quarter", "QS": "quarter", diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index b520f9f4a6deb..67494546010e2 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -2042,7 +2042,7 @@ def isocalendar(self) -> DataFrame: >>> idx = pd.date_range("2012-01-01", "2015-01-01", freq="Y") >>> idx DatetimeIndex(['2012-12-31', '2013-12-31', '2014-12-31'], - dtype='datetime64[ns]', freq='A-DEC') + dtype='datetime64[ns]', freq='Y-DEC') >>> idx.is_leap_year array([ True, False, False]) diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 4532e5bffe7a9..2468535397568 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -548,7 +548,7 @@ def __arrow_array__(self, type=None): >>> idx = pd.PeriodIndex(["2023", "2024", "2025"], freq="Y") >>> idx - PeriodIndex(['2023', '2024', '2025'], dtype='period[A-DEC]') + PeriodIndex(['2023', '2024', '2025'], dtype='period[Y-DEC]') >>> idx.dayofyear Index([365, 366, 365], dtype='int64') """, @@ -712,10 +712,10 @@ def asfreq(self, freq=None, how: str = "E") -> Self: Examples -------- - >>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='A') + >>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='Y') >>> pidx PeriodIndex(['2010', '2011', '2012', '2013', '2014', '2015'], - dtype='period[A-DEC]') + dtype='period[Y-DEC]') >>> pidx.asfreq('M') PeriodIndex(['2010-12', '2011-12', '2012-12', '2013-12', '2014-12', @@ -1025,18 +1025,18 @@ def period_array( Examples -------- - >>> period_array([pd.Period('2017', freq='A'), - ... pd.Period('2018', freq='A')]) + >>> period_array([pd.Period('2017', freq='Y'), + ... pd.Period('2018', freq='Y')]) <PeriodArray> ['2017', '2018'] - Length: 2, dtype: period[A-DEC] + Length: 2, dtype: period[Y-DEC] - >>> period_array([pd.Period('2017', freq='A'), - ... pd.Period('2018', freq='A'), + >>> period_array([pd.Period('2017', freq='Y'), + ... pd.Period('2018', freq='Y'), ... pd.NaT]) <PeriodArray> ['2017', '2018', 'NaT'] - Length: 3, dtype: period[A-DEC] + Length: 3, dtype: period[Y-DEC] Integers that look like years are handled diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 9da4eac6a42c8..efee47d1c2686 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -402,7 +402,7 @@ def is_period_dtype(arr_or_dtype) -> bool: False >>> is_period_dtype(pd.Period("2017-01-01")) False - >>> is_period_dtype(pd.PeriodIndex([], freq="A")) + >>> is_period_dtype(pd.PeriodIndex([], freq="Y")) True """ warnings.warn( diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 432c0a745c7a0..8b6226a8a3473 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -12068,7 +12068,7 @@ def to_period( For the yearly frequency >>> idx.to_period("Y") - PeriodIndex(['2001', '2002', '2003'], dtype='period[A-DEC]') + PeriodIndex(['2001', '2002', '2003'], dtype='period[Y-DEC]') """ new_obj = self.copy(deep=copy and not using_copy_on_write()) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 738f4cbe6bc43..d81e21cdbdb1e 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -9173,11 +9173,11 @@ def resample( Use frame.T.resample(...) instead. closed : {{'right', 'left'}}, default None Which side of bin interval is closed. The default is 'left' - for all frequency offsets except for 'ME', 'A', 'Q', 'BM', + for all frequency offsets except for 'ME', 'Y', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. label : {{'right', 'left'}}, default None Which bin edge label to label bucket with. The default is 'left' - for all frequency offsets except for 'ME', 'A', 'Q', 'BM', + for all frequency offsets except for 'ME', 'Y', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. convention : {{'start', 'end', 's', 'e'}}, default 'start' For `PeriodIndex` only, controls whether to use the start or @@ -9348,12 +9348,12 @@ def resample( assigned to the first quarter of the period. >>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01', - ... freq='A', + ... freq='Y', ... periods=2)) >>> s 2012 1 2013 2 - Freq: A-DEC, dtype: int64 + Freq: Y-DEC, dtype: int64 >>> s.resample('Q', convention='start').asfreq() 2012Q1 1.0 2012Q2 NaN diff --git a/pandas/core/resample.py b/pandas/core/resample.py index e9b2bacd9e1df..9d6256ca75dd8 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -2101,7 +2101,7 @@ def __init__( else: freq = to_offset(freq) - end_types = {"ME", "A", "Q", "BM", "BA", "BQ", "W"} + end_types = {"ME", "Y", "Q", "BM", "BA", "BQ", "W"} rule = freq.rule_code if rule in end_types or ("-" in rule and rule[: rule.find("-")] in end_types): if closed is None: @@ -2301,7 +2301,7 @@ def _adjust_bin_edges( "BQ", "BA", "Q", - "A", + "Y", "W", ): # If the right end-point is on the last day of the month, roll forwards diff --git a/pandas/core/series.py b/pandas/core/series.py index fd50a85f3c2e3..8ffc97e7143ef 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -5651,7 +5651,7 @@ def to_timestamp( 2023 1 2024 2 2025 3 - Freq: A-DEC, dtype: int64 + Freq: Y-DEC, dtype: int64 The resulting frequency of the Timestamps is `YearBegin` @@ -5670,7 +5670,7 @@ def to_timestamp( 2023-01-31 1 2024-01-31 2 2025-01-31 3 - Freq: A-JAN, dtype: int64 + Freq: Y-JAN, dtype: int64 """ if not isinstance(self.index, PeriodIndex): raise TypeError(f"unsupported Type {type(self.index).__name__}") @@ -5705,12 +5705,12 @@ def to_period(self, freq: str | None = None, copy: bool | None = None) -> Series 2023 1 2024 2 2025 3 - Freq: A-DEC, dtype: int64 + Freq: Y-DEC, dtype: int64 Viewing the index >>> s.index - PeriodIndex(['2023', '2024', '2025'], dtype='period[A-DEC]') + PeriodIndex(['2023', '2024', '2025'], dtype='period[Y-DEC]') """ if not isinstance(self.index, DatetimeIndex): raise TypeError(f"unsupported Type {type(self.index).__name__}") diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py index ee8391830db4c..bee1e1a385672 100644 --- a/pandas/tests/arithmetic/test_period.py +++ b/pandas/tests/arithmetic/test_period.py @@ -286,14 +286,14 @@ def test_parr_cmp_pi_mismatched_freq(self, freq, box_with_array): msg = rf"Invalid comparison between dtype=period\[{freq}\] and Period" with pytest.raises(TypeError, match=msg): - base <= Period("2011", freq="A") + base <= Period("2011", freq="Y") with pytest.raises(TypeError, match=msg): - Period("2011", freq="A") >= base + Period("2011", freq="Y") >= base # TODO: Could parametrize over boxes for idx? - idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="A") - rev_msg = r"Invalid comparison between dtype=period\[A-DEC\] and PeriodArray" + idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="Y") + rev_msg = r"Invalid comparison between dtype=period\[Y-DEC\] and PeriodArray" idx_msg = rev_msg if box_with_array in [tm.to_array, pd.array] else msg with pytest.raises(TypeError, match=idx_msg): base <= idx @@ -405,18 +405,18 @@ def test_cmp_series_period_series_mixed_freq(self): # GH#13200 base = Series( [ - Period("2011", freq="A"), + Period("2011", freq="Y"), Period("2011-02", freq="M"), - Period("2013", freq="A"), + Period("2013", freq="Y"), Period("2011-04", freq="M"), ] ) ser = Series( [ - Period("2012", freq="A"), + Period("2012", freq="Y"), Period("2011-01", freq="M"), - Period("2013", freq="A"), + Period("2013", freq="Y"), Period("2011-05", freq="M"), ] ) @@ -934,9 +934,9 @@ def test_pi_add_sub_int_array_freqn_gt1(self): def test_pi_sub_isub_offset(self): # offset # DateOffset - rng = period_range("2014", "2024", freq="A") + rng = period_range("2014", "2024", freq="Y") result = rng - pd.offsets.YearEnd(5) - expected = period_range("2009", "2019", freq="A") + expected = period_range("2009", "2019", freq="Y") tm.assert_index_equal(result, expected) rng -= pd.offsets.YearEnd(5) tm.assert_index_equal(rng, expected) @@ -1176,17 +1176,17 @@ def test_pi_sub_isub_timedeltalike_hourly(self, two_hours): def test_add_iadd_timedeltalike_annual(self): # offset # DateOffset - rng = period_range("2014", "2024", freq="A") + rng = period_range("2014", "2024", freq="Y") result = rng + pd.offsets.YearEnd(5) - expected = period_range("2019", "2029", freq="A") + expected = period_range("2019", "2029", freq="Y") tm.assert_index_equal(result, expected) rng += pd.offsets.YearEnd(5) tm.assert_index_equal(rng, expected) def test_pi_add_sub_timedeltalike_freq_mismatch_annual(self, mismatched_freq): other = mismatched_freq - rng = period_range("2014", "2024", freq="A") - msg = "Input has different freq(=.+)? from Period.*?\\(freq=A-DEC\\)" + rng = period_range("2014", "2024", freq="Y") + msg = "Input has different freq(=.+)? from Period.*?\\(freq=Y-DEC\\)" with pytest.raises(IncompatibleFrequency, match=msg): rng + other with pytest.raises(IncompatibleFrequency, match=msg): diff --git a/pandas/tests/arrays/categorical/test_astype.py b/pandas/tests/arrays/categorical/test_astype.py index d2f9f6dffab49..7fba150c9113f 100644 --- a/pandas/tests/arrays/categorical/test_astype.py +++ b/pandas/tests/arrays/categorical/test_astype.py @@ -32,7 +32,7 @@ def test_astype_nan_to_int(self, cls, values): [ array(["2019", "2020"], dtype="datetime64[ns, UTC]"), array([0, 0], dtype="timedelta64[ns]"), - array([Period("2019"), Period("2020")], dtype="period[A-DEC]"), + array([Period("2019"), Period("2020")], dtype="period[Y-DEC]"), array([Interval(0, 1), Interval(1, 2)], dtype="interval"), array([1, np.nan], dtype="Int64"), ], diff --git a/pandas/tests/arrays/period/test_arrow_compat.py b/pandas/tests/arrays/period/test_arrow_compat.py index 903fc3177aa84..6c04d7c603d4c 100644 --- a/pandas/tests/arrays/period/test_arrow_compat.py +++ b/pandas/tests/arrays/period/test_arrow_compat.py @@ -33,7 +33,7 @@ def test_arrow_extension_type(): "data, freq", [ (pd.date_range("2017", periods=3), "D"), - (pd.date_range("2017", periods=3, freq="A"), "A-DEC"), + (pd.date_range("2017", periods=3, freq="Y"), "Y-DEC"), ], ) def test_arrow_array(data, freq): diff --git a/pandas/tests/arrays/period/test_constructors.py b/pandas/tests/arrays/period/test_constructors.py index 0ea26a6ece7eb..d034162f1b46e 100644 --- a/pandas/tests/arrays/period/test_constructors.py +++ b/pandas/tests/arrays/period/test_constructors.py @@ -71,11 +71,11 @@ def test_from_datetime64_freq_2M(freq): "data, freq, msg", [ ( - [pd.Period("2017", "D"), pd.Period("2017", "A")], + [pd.Period("2017", "D"), pd.Period("2017", "Y")], None, "Input has different freq", ), - ([pd.Period("2017", "D")], "A", "Input has different freq"), + ([pd.Period("2017", "D")], "Y", "Input has different freq"), ], ) def test_period_array_raises(data, freq, msg): diff --git a/pandas/tests/arrays/test_array.py b/pandas/tests/arrays/test_array.py index 2746cd91963a0..0aeedf4d03919 100644 --- a/pandas/tests/arrays/test_array.py +++ b/pandas/tests/arrays/test_array.py @@ -350,7 +350,7 @@ def test_array_inference(data, expected): "data", [ # mix of frequencies - [pd.Period("2000", "D"), pd.Period("2001", "A")], + [pd.Period("2000", "D"), pd.Period("2001", "Y")], # mix of closed [pd.Interval(0, 1, closed="left"), pd.Interval(1, 2, closed="right")], # Mix of timezones diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py index a105852395b3a..fc46e5a372806 100644 --- a/pandas/tests/arrays/test_datetimes.py +++ b/pandas/tests/arrays/test_datetimes.py @@ -747,7 +747,7 @@ def test_iter_zoneinfo_fold(self, tz): assert left.utcoffset() == right2.utcoffset() def test_date_range_frequency_M_deprecated(self): - depr_msg = r"\'M\' will be deprecated, please use \'ME\' for \'month end\'" + depr_msg = "'M' will be deprecated, please use 'ME' instead." expected = pd.date_range("1/1/2000", periods=4, freq="2ME") with tm.assert_produces_warning(UserWarning, match=depr_msg): diff --git a/pandas/tests/arrays/test_period.py b/pandas/tests/arrays/test_period.py index d1e954bc2ebe2..43a80a92573c5 100644 --- a/pandas/tests/arrays/test_period.py +++ b/pandas/tests/arrays/test_period.py @@ -82,9 +82,9 @@ def test_setitem(key, value, expected): def test_setitem_raises_incompatible_freq(): arr = PeriodArray(np.arange(3), dtype="period[D]") with pytest.raises(IncompatibleFrequency, match="freq"): - arr[0] = pd.Period("2000", freq="A") + arr[0] = pd.Period("2000", freq="Y") - other = PeriodArray._from_sequence(["2000", "2001"], dtype="period[A]") + other = PeriodArray._from_sequence(["2000", "2001"], dtype="period[Y]") with pytest.raises(IncompatibleFrequency, match="freq"): arr[[0, 1]] = other diff --git a/pandas/tests/base/test_conversion.py b/pandas/tests/base/test_conversion.py index db13e979a3c2d..20ee2d443f340 100644 --- a/pandas/tests/base/test_conversion.py +++ b/pandas/tests/base/test_conversion.py @@ -192,9 +192,9 @@ def test_iter_box(self): "datetime64[ns, US/Central]", ), ( - pd.PeriodIndex([2018, 2019], freq="A"), + pd.PeriodIndex([2018, 2019], freq="Y"), PeriodArray, - pd.core.dtypes.dtypes.PeriodDtype("A-DEC"), + pd.core.dtypes.dtypes.PeriodDtype("Y-DEC"), ), (pd.IntervalIndex.from_breaks([0, 1, 2]), IntervalArray, "interval"), ( diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py index 4507857418e9e..b0ca0f4705194 100644 --- a/pandas/tests/dtypes/test_common.py +++ b/pandas/tests/dtypes/test_common.py @@ -274,7 +274,7 @@ def test_is_period_dtype(): assert not com.is_period_dtype(pd.Period("2017-01-01")) assert com.is_period_dtype(PeriodDtype(freq="D")) - assert com.is_period_dtype(pd.PeriodIndex([], freq="A")) + assert com.is_period_dtype(pd.PeriodIndex([], freq="Y")) def test_is_interval_dtype(): diff --git a/pandas/tests/frame/methods/test_asfreq.py b/pandas/tests/frame/methods/test_asfreq.py index a2f8f3e278395..0527bfb16492c 100644 --- a/pandas/tests/frame/methods/test_asfreq.py +++ b/pandas/tests/frame/methods/test_asfreq.py @@ -104,7 +104,7 @@ def test_asfreq_keep_index_name(self, frame_or_series): assert index_name == obj.asfreq("10D").index.name def test_asfreq_ts(self, frame_or_series): - index = period_range(freq="A", start="1/1/2001", end="12/31/2010") + index = period_range(freq="Y", start="1/1/2001", end="12/31/2010") obj = DataFrame( np.random.default_rng(2).standard_normal((len(index), 3)), index=index ) @@ -235,7 +235,7 @@ def test_asfreq_2ME(self, freq, freq_half): tm.assert_frame_equal(result, expected) def test_asfreq_frequency_M_deprecated(self): - depr_msg = r"\'M\' will be deprecated, please use \'ME\' for \'month end\'" + depr_msg = "'M' will be deprecated, please use 'ME' instead." index = date_range("1/1/2000", periods=4, freq="ME") df = DataFrame({"s": Series([0.0, 1.0, 2.0, 3.0], index=index)}) diff --git a/pandas/tests/frame/methods/test_join.py b/pandas/tests/frame/methods/test_join.py index 2d4ac1d4a4444..d9796a5b25c63 100644 --- a/pandas/tests/frame/methods/test_join.py +++ b/pandas/tests/frame/methods/test_join.py @@ -22,7 +22,7 @@ def frame_with_period_index(): return DataFrame( data=np.arange(20).reshape(4, 5), columns=list("abcde"), - index=period_range(start="2000", freq="A", periods=4), + index=period_range(start="2000", freq="Y", periods=4), ) diff --git a/pandas/tests/frame/methods/test_reindex.py b/pandas/tests/frame/methods/test_reindex.py index 0105c41bd0eca..bba86b481eadc 100644 --- a/pandas/tests/frame/methods/test_reindex.py +++ b/pandas/tests/frame/methods/test_reindex.py @@ -36,7 +36,7 @@ def test_dti_set_index_reindex_datetimeindex(self): # GH#6631 df = DataFrame(np.random.default_rng(2).random(6)) idx1 = date_range("2011/01/01", periods=6, freq="ME", tz="US/Eastern") - idx2 = date_range("2013", periods=6, freq="A", tz="Asia/Tokyo") + idx2 = date_range("2013", periods=6, freq="Y", tz="Asia/Tokyo") df = df.set_index(idx1) tm.assert_index_equal(df.index, idx1) diff --git a/pandas/tests/frame/methods/test_set_index.py b/pandas/tests/frame/methods/test_set_index.py index 5984e591dd6c1..f755ef0c2763d 100644 --- a/pandas/tests/frame/methods/test_set_index.py +++ b/pandas/tests/frame/methods/test_set_index.py @@ -493,7 +493,7 @@ def test_set_index_period(self): idx1 = idx1.append(idx1) idx2 = period_range("2013-01-01 09:00", periods=2, freq="H") idx2 = idx2.append(idx2).append(idx2) - idx3 = period_range("2005", periods=6, freq="A") + idx3 = period_range("2005", periods=6, freq="Y") df = df.set_index(idx1) df = df.set_index(idx2, append=True) @@ -694,7 +694,7 @@ def test_set_index_periodindex(self): # GH#6631 df = DataFrame(np.random.default_rng(2).random(6)) idx1 = period_range("2011/01/01", periods=6, freq="M") - idx2 = period_range("2013", periods=6, freq="A") + idx2 = period_range("2013", periods=6, freq="Y") df = df.set_index(idx1) tm.assert_index_equal(df.index, idx1) diff --git a/pandas/tests/frame/methods/test_to_timestamp.py b/pandas/tests/frame/methods/test_to_timestamp.py index e72b576fca833..478708ce90488 100644 --- a/pandas/tests/frame/methods/test_to_timestamp.py +++ b/pandas/tests/frame/methods/test_to_timestamp.py @@ -16,7 +16,7 @@ import pandas._testing as tm -def _get_with_delta(delta, freq="A-DEC"): +def _get_with_delta(delta, freq="Y-DEC"): return date_range( to_datetime("1/1/2001") + delta, to_datetime("12/31/2009") + delta, @@ -27,7 +27,7 @@ def _get_with_delta(delta, freq="A-DEC"): class TestToTimestamp: def test_to_timestamp(self, frame_or_series): K = 5 - index = period_range(freq="A", start="1/1/2001", end="12/1/2009") + index = period_range(freq="Y", start="1/1/2001", end="12/1/2009") obj = DataFrame( np.random.default_rng(2).standard_normal((len(index), K)), index=index, @@ -36,7 +36,7 @@ def test_to_timestamp(self, frame_or_series): obj["mix"] = "a" obj = tm.get_obj(obj, frame_or_series) - exp_index = date_range("1/1/2001", end="12/31/2009", freq="A-DEC") + exp_index = date_range("1/1/2001", end="12/31/2009", freq="Y-DEC") exp_index = exp_index + Timedelta(1, "D") - Timedelta(1, "ns") result = obj.to_timestamp("D", "end") tm.assert_index_equal(result.index, exp_index) @@ -71,7 +71,7 @@ def test_to_timestamp(self, frame_or_series): def test_to_timestamp_columns(self): K = 5 - index = period_range(freq="A", start="1/1/2001", end="12/1/2009") + index = period_range(freq="Y", start="1/1/2001", end="12/1/2009") df = DataFrame( np.random.default_rng(2).standard_normal((len(index), K)), index=index, @@ -82,7 +82,7 @@ def test_to_timestamp_columns(self): # columns df = df.T - exp_index = date_range("1/1/2001", end="12/31/2009", freq="A-DEC") + exp_index = date_range("1/1/2001", end="12/31/2009", freq="Y-DEC") exp_index = exp_index + Timedelta(1, "D") - Timedelta(1, "ns") result = df.to_timestamp("D", "end", axis=1) tm.assert_index_equal(result.columns, exp_index) @@ -122,7 +122,7 @@ def test_to_timestamp_columns(self): assert result2.columns.freqstr == "AS-JAN" def test_to_timestamp_invalid_axis(self): - index = period_range(freq="A", start="1/1/2001", end="12/1/2009") + index = period_range(freq="Y", start="1/1/2001", end="12/1/2009") obj = DataFrame( np.random.default_rng(2).standard_normal((len(index), 5)), index=index ) diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index bb9a76829c77d..2c3b732fe7196 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -1236,7 +1236,7 @@ def test_frame_add_tz_mismatch_converts_to_utc(self): assert result.index.tz is timezone.utc def test_align_frame(self): - rng = pd.period_range("1/1/2000", "1/1/2010", freq="A") + rng = pd.period_range("1/1/2000", "1/1/2010", freq="Y") ts = DataFrame( np.random.default_rng(2).standard_normal((len(rng), 3)), index=rng ) diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py index e66557f132c1d..c6eb2c6b047f4 100644 --- a/pandas/tests/frame/test_reductions.py +++ b/pandas/tests/frame/test_reductions.py @@ -908,7 +908,7 @@ def test_mean_datetimelike(self): "A": np.arange(3), "B": date_range("2016-01-01", periods=3), "C": pd.timedelta_range("1D", periods=3), - "D": pd.period_range("2016", periods=3, freq="A"), + "D": pd.period_range("2016", periods=3, freq="Y"), } ) result = df.mean(numeric_only=True) @@ -933,7 +933,7 @@ def test_mean_datetimelike_numeric_only_false(self): tm.assert_series_equal(result, expected) # mean of period is not allowed - df["D"] = pd.period_range("2016", periods=3, freq="A") + df["D"] = pd.period_range("2016", periods=3, freq="Y") with pytest.raises(TypeError, match="mean is not implemented for Period"): df.mean(numeric_only=False) diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py index 64d516e484991..55c239f7284c1 100644 --- a/pandas/tests/frame/test_repr_info.py +++ b/pandas/tests/frame/test_repr_info.py @@ -339,7 +339,7 @@ def test_repr_np_nat_with_object(self, arg, box, expected): assert result == expected def test_frame_datetime64_pre1900_repr(self): - df = DataFrame({"year": date_range("1/1/1700", periods=50, freq="A-DEC")}) + df = DataFrame({"year": date_range("1/1/1700", periods=50, freq="Y-DEC")}) # it works! repr(df) diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py index a9e67df1fb793..a3dc9e3087c7b 100644 --- a/pandas/tests/groupby/test_timegrouper.py +++ b/pandas/tests/groupby/test_timegrouper.py @@ -193,7 +193,7 @@ def test_timegrouper_with_reg_groups(self): ).set_index(["Date", "Buyer"]) msg = "The default value of numeric_only" - result = df.groupby([Grouper(freq="A"), "Buyer"]).sum(numeric_only=True) + result = df.groupby([Grouper(freq="Y"), "Buyer"]).sum(numeric_only=True) tm.assert_frame_equal(result, expected) expected = DataFrame( @@ -336,7 +336,7 @@ def test_timegrouper_with_reg_groups(self): ) tm.assert_frame_equal(result, expected) - @pytest.mark.parametrize("freq", ["D", "ME", "A", "Q-APR"]) + @pytest.mark.parametrize("freq", ["D", "ME", "Y", "Q-APR"]) def test_timegrouper_with_reg_groups_freq(self, freq): # GH 6764 multiple grouping with/without sort df = DataFrame( diff --git a/pandas/tests/indexes/datetimelike_/test_sort_values.py b/pandas/tests/indexes/datetimelike_/test_sort_values.py index ab1c15f003d4d..cf919bfa29d10 100644 --- a/pandas/tests/indexes/datetimelike_/test_sort_values.py +++ b/pandas/tests/indexes/datetimelike_/test_sort_values.py @@ -127,7 +127,7 @@ def test_sort_values_with_freq_periodindex(self, freq): @pytest.mark.parametrize( "idx", [ - PeriodIndex(["2011", "2012", "2013"], name="pidx", freq="A"), + PeriodIndex(["2011", "2012", "2013"], name="pidx", freq="Y"), Index([2011, 2012, 2013], name="idx"), # for compatibility check ], ) @@ -275,10 +275,10 @@ def test_sort_values_without_freq_datetimeindex( ), ( PeriodIndex( - ["2011", "2013", "2015", "2012", "2011"], name="pidx", freq="A" + ["2011", "2013", "2015", "2012", "2011"], name="pidx", freq="Y" ), PeriodIndex( - ["2011", "2011", "2012", "2013", "2015"], name="pidx", freq="A" + ["2011", "2011", "2012", "2013", "2015"], name="pidx", freq="Y" ), ), ( @@ -308,7 +308,7 @@ def test_sort_values_without_freq_periodindex_nat(self): def test_order_stability_compat(): # GH#35922. sort_values is stable both for normal and datetime-like Index - pidx = PeriodIndex(["2011", "2013", "2015", "2012", "2011"], name="pidx", freq="A") + pidx = PeriodIndex(["2011", "2013", "2015", "2012", "2011"], name="pidx", freq="Y") iidx = Index([2011, 2013, 2015, 2012, 2011], name="idx") ordered1, indexer1 = pidx.sort_values(return_indexer=True, ascending=False) ordered2, indexer2 = iidx.sort_values(return_indexer=True, ascending=False) diff --git a/pandas/tests/indexes/datetimes/methods/test_to_period.py b/pandas/tests/indexes/datetimes/methods/test_to_period.py index 5f266ea0b42a6..7712a4166329c 100644 --- a/pandas/tests/indexes/datetimes/methods/test_to_period.py +++ b/pandas/tests/indexes/datetimes/methods/test_to_period.py @@ -60,7 +60,7 @@ def test_to_period_quarterlyish(self, off): def test_to_period_annualish(self, off): rng = date_range("01-Jan-2012", periods=8, freq=off) prng = rng.to_period() - assert prng.freq == "A-DEC" + assert prng.freq == "Y-DEC" def test_to_period_monthish(self): offsets = ["MS", "BM"] diff --git a/pandas/tests/indexes/datetimes/test_constructors.py b/pandas/tests/indexes/datetimes/test_constructors.py index 61c8cc4a50fe2..7dee58e63fa88 100644 --- a/pandas/tests/indexes/datetimes/test_constructors.py +++ b/pandas/tests/indexes/datetimes/test_constructors.py @@ -753,7 +753,7 @@ def test_constructor_invalid_dtype_raises(self, dtype): DatetimeIndex([1, 2], dtype=dtype) def test_constructor_name(self): - idx = date_range(start="2000-01-01", periods=1, freq="A", name="TEST") + idx = date_range(start="2000-01-01", periods=1, freq="Y", name="TEST") assert idx.name == "TEST" def test_000constructor_resolution(self): @@ -978,8 +978,8 @@ def test_dti_constructor_years_only(self, tz_naive_fixture): rng2 = date_range("2014", "2015", freq="MS", tz=tz) expected2 = date_range("2014-01-01", "2015-01-01", freq="MS", tz=tz) - rng3 = date_range("2014", "2020", freq="A", tz=tz) - expected3 = date_range("2014-12-31", "2019-12-31", freq="A", tz=tz) + rng3 = date_range("2014", "2020", freq="Y", tz=tz) + expected3 = date_range("2014-12-31", "2019-12-31", freq="Y", tz=tz) rng4 = date_range("2014", "2020", freq="AS", tz=tz) expected4 = date_range("2014-01-01", "2020-01-01", freq="AS", tz=tz) @@ -1036,7 +1036,7 @@ def test_constructor_int64_nocopy(self): assert (index.asi8[50:100] != -1).all() @pytest.mark.parametrize( - "freq", ["ME", "Q", "A", "D", "B", "BH", "min", "s", "ms", "us", "H", "ns", "C"] + "freq", ["ME", "Q", "Y", "D", "B", "BH", "min", "s", "ms", "us", "H", "ns", "C"] ) def test_from_freq_recreate_from_data(self, freq): org = date_range(start="2001/02/01 09:00", freq=freq, periods=1) diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py index b93aee1d988de..f0996d7af917d 100644 --- a/pandas/tests/indexes/datetimes/test_date_range.py +++ b/pandas/tests/indexes/datetimes/test_date_range.py @@ -7,6 +7,7 @@ time, timedelta, ) +import re import numpy as np import pytest @@ -252,12 +253,11 @@ def test_begin_year_alias(self, freq): ) tm.assert_index_equal(rng, exp) - @pytest.mark.parametrize("freq", ["A", "Y"]) - def test_end_year_alias(self, freq): + def test_end_year_alias(self): # see gh-9313 - rng = date_range("1/1/2013", "7/1/2017", freq=freq) + rng = date_range("1/1/2013", "7/1/2017", freq="Y") exp = DatetimeIndex( - ["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-31"], freq=freq + ["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-31"], freq="Y" ) tm.assert_index_equal(rng, exp) @@ -272,10 +272,10 @@ def test_business_end_year_alias(self, freq): def test_date_range_negative_freq(self): # GH 11018 - rng = date_range("2011-12-31", freq="-2A", periods=3) - exp = DatetimeIndex(["2011-12-31", "2009-12-31", "2007-12-31"], freq="-2A") + rng = date_range("2011-12-31", freq="-2Y", periods=3) + exp = DatetimeIndex(["2011-12-31", "2009-12-31", "2007-12-31"], freq="-2Y") tm.assert_index_equal(rng, exp) - assert rng.freq == "-2A" + assert rng.freq == "-2Y" rng = date_range("2011-01-31", freq="-2ME", periods=3) exp = DatetimeIndex(["2011-01-31", "2010-11-30", "2010-09-30"], freq="-2ME") @@ -638,7 +638,7 @@ def test_range_tz_dateutil(self): assert dr[0] == start assert dr[2] == end - @pytest.mark.parametrize("freq", ["1D", "3D", "2ME", "7W", "3H", "A"]) + @pytest.mark.parametrize("freq", ["1D", "3D", "2ME", "7W", "3H", "Y"]) def test_range_closed(self, freq, inclusive_endpoints_fixture): begin = datetime(2011, 1, 1) end = datetime(2014, 1, 1) @@ -653,7 +653,7 @@ def test_range_closed(self, freq, inclusive_endpoints_fixture): tm.assert_index_equal(expected_range, result_range) - @pytest.mark.parametrize("freq", ["1D", "3D", "2ME", "7W", "3H", "A"]) + @pytest.mark.parametrize("freq", ["1D", "3D", "2ME", "7W", "3H", "Y"]) def test_range_closed_with_tz_aware_start_end( self, freq, inclusive_endpoints_fixture ): @@ -674,7 +674,7 @@ def test_range_closed_with_tz_aware_start_end( tm.assert_index_equal(expected_range, result_range) - @pytest.mark.parametrize("freq", ["1D", "3D", "2ME", "7W", "3H", "A"]) + @pytest.mark.parametrize("freq", ["1D", "3D", "2ME", "7W", "3H", "Y"]) def test_range_with_tz_closed_with_tz_aware_start_end( self, freq, inclusive_endpoints_fixture ): @@ -839,20 +839,23 @@ def test_freq_dateoffset_with_relateivedelta_nanos(self): @pytest.mark.parametrize( "freq,freq_depr", [ - ("min", "T"), - ("s", "S"), - ("ms", "L"), - ("us", "U"), - ("ns", "N"), + ("2Y", "2A"), + ("200Y-MAY", "200A-MAY"), + ("2min", "2T"), + ("1s", "1S"), + ("2ms", "2L"), + ("1us", "1U"), + ("2ns", "2N"), ], ) - def test_frequencies_T_S_L_U_N_deprecated(self, freq, freq_depr): + def test_frequencies_A_T_S_L_U_N_deprecated(self, freq, freq_depr): # GH#52536 - msg = f"'{freq_depr}' is deprecated and will be removed in a future version." + freq_msg = re.split("[0-9]*", freq_depr, maxsplit=1)[1] + msg = f"'{freq_msg}' is deprecated and will be removed in a future version." - expected = date_range("1/1/2000", periods=4, freq=freq) + expected = date_range("1/1/2000", periods=2, freq=freq) with tm.assert_produces_warning(FutureWarning, match=msg): - result = date_range("1/1/2000", periods=4, freq=freq_depr) + result = date_range("1/1/2000", periods=2, freq=freq_depr) tm.assert_index_equal(result, expected) diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index 21dc22bea87dc..ac6d0a97956e4 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -20,7 +20,7 @@ class TestDatetimeIndexOps: @pytest.mark.parametrize( "freq,expected", [ - ("A", "day"), + ("Y", "day"), ("Q", "day"), ("ME", "day"), ("D", "day"), @@ -33,7 +33,7 @@ class TestDatetimeIndexOps: ) def test_resolution(self, request, tz_naive_fixture, freq, expected): tz = tz_naive_fixture - if freq == "A" and not IS64 and isinstance(tz, tzlocal): + if freq == "Y" and not IS64 and isinstance(tz, tzlocal): request.node.add_marker( pytest.mark.xfail(reason="OverflowError inside tzlocal past 2038") ) diff --git a/pandas/tests/indexes/period/methods/test_asfreq.py b/pandas/tests/indexes/period/methods/test_asfreq.py index 89ea4fb6472d0..f9838ce272296 100644 --- a/pandas/tests/indexes/period/methods/test_asfreq.py +++ b/pandas/tests/indexes/period/methods/test_asfreq.py @@ -10,7 +10,7 @@ class TestPeriodIndex: def test_asfreq(self): - pi1 = period_range(freq="A", start="1/1/2001", end="1/1/2001") + pi1 = period_range(freq="Y", start="1/1/2001", end="1/1/2001") pi2 = period_range(freq="Q", start="1/1/2001", end="1/1/2001") pi3 = period_range(freq="M", start="1/1/2001", end="1/1/2001") pi4 = period_range(freq="D", start="1/1/2001", end="1/1/2001") @@ -26,42 +26,42 @@ def test_asfreq(self): assert pi1.asfreq("Min", "s") == pi6 assert pi1.asfreq("s", "s") == pi7 - assert pi2.asfreq("A", "s") == pi1 + assert pi2.asfreq("Y", "s") == pi1 assert pi2.asfreq("M", "s") == pi3 assert pi2.asfreq("D", "s") == pi4 assert pi2.asfreq("H", "s") == pi5 assert pi2.asfreq("Min", "s") == pi6 assert pi2.asfreq("s", "s") == pi7 - assert pi3.asfreq("A", "s") == pi1 + assert pi3.asfreq("Y", "s") == pi1 assert pi3.asfreq("Q", "s") == pi2 assert pi3.asfreq("D", "s") == pi4 assert pi3.asfreq("H", "s") == pi5 assert pi3.asfreq("Min", "s") == pi6 assert pi3.asfreq("s", "s") == pi7 - assert pi4.asfreq("A", "s") == pi1 + assert pi4.asfreq("Y", "s") == pi1 assert pi4.asfreq("Q", "s") == pi2 assert pi4.asfreq("M", "s") == pi3 assert pi4.asfreq("H", "s") == pi5 assert pi4.asfreq("Min", "s") == pi6 assert pi4.asfreq("s", "s") == pi7 - assert pi5.asfreq("A", "s") == pi1 + assert pi5.asfreq("Y", "s") == pi1 assert pi5.asfreq("Q", "s") == pi2 assert pi5.asfreq("M", "s") == pi3 assert pi5.asfreq("D", "s") == pi4 assert pi5.asfreq("Min", "s") == pi6 assert pi5.asfreq("s", "s") == pi7 - assert pi6.asfreq("A", "s") == pi1 + assert pi6.asfreq("Y", "s") == pi1 assert pi6.asfreq("Q", "s") == pi2 assert pi6.asfreq("M", "s") == pi3 assert pi6.asfreq("D", "s") == pi4 assert pi6.asfreq("H", "s") == pi5 assert pi6.asfreq("s", "s") == pi7 - assert pi7.asfreq("A", "s") == pi1 + assert pi7.asfreq("Y", "s") == pi1 assert pi7.asfreq("Q", "s") == pi2 assert pi7.asfreq("M", "s") == pi3 assert pi7.asfreq("D", "s") == pi4 diff --git a/pandas/tests/indexes/period/methods/test_astype.py b/pandas/tests/indexes/period/methods/test_astype.py index e54cd73a35f59..07595b6b8c1dd 100644 --- a/pandas/tests/indexes/period/methods/test_astype.py +++ b/pandas/tests/indexes/period/methods/test_astype.py @@ -44,7 +44,7 @@ def test_astype_conversion(self): expected = Index([str(x) for x in idx], name="idx") tm.assert_index_equal(result, expected) - idx = period_range("1990", "2009", freq="A", name="idx") + idx = period_range("1990", "2009", freq="Y", name="idx") result = idx.astype("i8") tm.assert_index_equal(result, Index(idx.asi8, name="idx")) tm.assert_numpy_array_equal(result.values, idx.asi8) diff --git a/pandas/tests/indexes/period/methods/test_is_full.py b/pandas/tests/indexes/period/methods/test_is_full.py index 490f199a59ed7..b4105bedbe21d 100644 --- a/pandas/tests/indexes/period/methods/test_is_full.py +++ b/pandas/tests/indexes/period/methods/test_is_full.py @@ -4,19 +4,19 @@ def test_is_full(): - index = PeriodIndex([2005, 2007, 2009], freq="A") + index = PeriodIndex([2005, 2007, 2009], freq="Y") assert not index.is_full - index = PeriodIndex([2005, 2006, 2007], freq="A") + index = PeriodIndex([2005, 2006, 2007], freq="Y") assert index.is_full - index = PeriodIndex([2005, 2005, 2007], freq="A") + index = PeriodIndex([2005, 2005, 2007], freq="Y") assert not index.is_full - index = PeriodIndex([2005, 2005, 2006], freq="A") + index = PeriodIndex([2005, 2005, 2006], freq="Y") assert index.is_full - index = PeriodIndex([2006, 2005, 2005], freq="A") + index = PeriodIndex([2006, 2005, 2005], freq="Y") with pytest.raises(ValueError, match="Index is not monotonic"): index.is_full diff --git a/pandas/tests/indexes/period/methods/test_shift.py b/pandas/tests/indexes/period/methods/test_shift.py index 48dc5f0e64d08..d649dd3da0864 100644 --- a/pandas/tests/indexes/period/methods/test_shift.py +++ b/pandas/tests/indexes/period/methods/test_shift.py @@ -29,16 +29,16 @@ def test_pi_shift_ndarray(self): tm.assert_index_equal(result, expected) def test_shift(self): - pi1 = period_range(freq="A", start="1/1/2001", end="12/1/2009") - pi2 = period_range(freq="A", start="1/1/2002", end="12/1/2010") + pi1 = period_range(freq="Y", start="1/1/2001", end="12/1/2009") + pi2 = period_range(freq="Y", start="1/1/2002", end="12/1/2010") tm.assert_index_equal(pi1.shift(0), pi1) assert len(pi1) == len(pi2) tm.assert_index_equal(pi1.shift(1), pi2) - pi1 = period_range(freq="A", start="1/1/2001", end="12/1/2009") - pi2 = period_range(freq="A", start="1/1/2000", end="12/1/2008") + pi1 = period_range(freq="Y", start="1/1/2001", end="12/1/2009") + pi2 = period_range(freq="Y", start="1/1/2000", end="12/1/2008") assert len(pi1) == len(pi2) tm.assert_index_equal(pi1.shift(-1), pi2) @@ -117,6 +117,6 @@ def test_shift_gh8083(self): def test_shift_periods(self): # GH #22458 : argument 'n' was deprecated in favor of 'periods' - idx = period_range(freq="A", start="1/1/2001", end="12/1/2009") + idx = period_range(freq="Y", start="1/1/2001", end="12/1/2009") tm.assert_index_equal(idx.shift(periods=0), idx) tm.assert_index_equal(idx.shift(0), idx) diff --git a/pandas/tests/indexes/period/methods/test_to_timestamp.py b/pandas/tests/indexes/period/methods/test_to_timestamp.py index 8bb0c3518c835..462f66eef7269 100644 --- a/pandas/tests/indexes/period/methods/test_to_timestamp.py +++ b/pandas/tests/indexes/period/methods/test_to_timestamp.py @@ -47,7 +47,7 @@ def test_to_timestamp_non_contiguous(self): tm.assert_datetime_array_equal(result, expected, check_freq=False) def test_to_timestamp_freq(self): - idx = period_range("2017", periods=12, freq="A-DEC") + idx = period_range("2017", periods=12, freq="Y-DEC") result = idx.to_timestamp() expected = date_range("2017", periods=12, freq="AS-JAN") tm.assert_index_equal(result, expected) @@ -72,12 +72,12 @@ def test_to_timestamp_pi_nat(self): tm.assert_index_equal(result3, exp) assert result3.freqstr == "3M" - msg = "Frequency must be positive, because it represents span: -2A" + msg = "Frequency must be positive, because it represents span: -2Y" with pytest.raises(ValueError, match=msg): - result.to_period(freq="-2A") + result.to_period(freq="-2Y") def test_to_timestamp_preserve_name(self): - index = period_range(freq="A", start="1/1/2001", end="12/1/2009", name="foo") + index = period_range(freq="Y", start="1/1/2001", end="12/1/2009", name="foo") assert index.name == "foo" conv = index.to_timestamp("D") diff --git a/pandas/tests/indexes/period/test_constructors.py b/pandas/tests/indexes/period/test_constructors.py index a5bdfa11140d1..ac4edb10d9352 100644 --- a/pandas/tests/indexes/period/test_constructors.py +++ b/pandas/tests/indexes/period/test_constructors.py @@ -166,7 +166,7 @@ def test_constructor_fromarraylike(self): msg = "'Period' object is not iterable" with pytest.raises(TypeError, match=msg): - PeriodIndex(data=Period("2007", freq="A")) + PeriodIndex(data=Period("2007", freq="Y")) result = PeriodIndex(iter(idx)) tm.assert_index_equal(result, idx) @@ -418,7 +418,7 @@ def test_constructor_freq_mult(self): @pytest.mark.parametrize( "freq_offset, freq_period", [ - ("A", "A"), + ("Y", "Y"), ("ME", "M"), ("D", "D"), ("min", "min"), @@ -453,7 +453,7 @@ def test_constructor_freq_combined(self): tm.assert_index_equal(pidx, expected) def test_constructor(self): - pi = period_range(freq="A", start="1/1/2001", end="12/1/2009") + pi = period_range(freq="Y", start="1/1/2001", end="12/1/2009") assert len(pi) == 9 pi = period_range(freq="Q", start="1/1/2001", end="12/1/2009") @@ -526,7 +526,7 @@ def test_constructor(self): Period("2006-12-31", ("w", 1)) @pytest.mark.parametrize( - "freq", ["M", "Q", "A", "D", "B", "min", "s", "ms", "us", "ns", "H"] + "freq", ["M", "Q", "Y", "D", "B", "min", "s", "ms", "us", "ns", "H"] ) @pytest.mark.filterwarnings( r"ignore:Period with BDay freq is deprecated:FutureWarning" @@ -539,7 +539,7 @@ def test_recreate_from_data(self, freq): def test_map_with_string_constructor(self): raw = [2005, 2007, 2009] - index = PeriodIndex(raw, freq="A") + index = PeriodIndex(raw, freq="Y") expected = Index([str(num) for num in raw]) res = index.map(str) diff --git a/pandas/tests/indexes/period/test_formats.py b/pandas/tests/indexes/period/test_formats.py index 87bbb96377a79..67deeccff4e2a 100644 --- a/pandas/tests/indexes/period/test_formats.py +++ b/pandas/tests/indexes/period/test_formats.py @@ -55,7 +55,7 @@ def test_representation(self, method): idx2 = PeriodIndex(["2011-01-01"], freq="D") idx3 = PeriodIndex(["2011-01-01", "2011-01-02"], freq="D") idx4 = PeriodIndex(["2011-01-01", "2011-01-02", "2011-01-03"], freq="D") - idx5 = PeriodIndex(["2011", "2012", "2013"], freq="A") + idx5 = PeriodIndex(["2011", "2012", "2013"], freq="Y") idx6 = PeriodIndex(["2011-01-01 09:00", "2012-02-01 10:00", "NaT"], freq="H") idx7 = pd.period_range("2013Q1", periods=1, freq="Q") idx8 = pd.period_range("2013Q1", periods=2, freq="Q") @@ -73,7 +73,7 @@ def test_representation(self, method): "dtype='period[D]')" ) - exp5 = "PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]')" + exp5 = "PeriodIndex(['2011', '2012', '2013'], dtype='period[Y-DEC]')" exp6 = ( "PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], " @@ -101,7 +101,7 @@ def test_representation_to_series(self): idx2 = PeriodIndex(["2011-01-01"], freq="D") idx3 = PeriodIndex(["2011-01-01", "2011-01-02"], freq="D") idx4 = PeriodIndex(["2011-01-01", "2011-01-02", "2011-01-03"], freq="D") - idx5 = PeriodIndex(["2011", "2012", "2013"], freq="A") + idx5 = PeriodIndex(["2011", "2012", "2013"], freq="Y") idx6 = PeriodIndex(["2011-01-01 09:00", "2012-02-01 10:00", "NaT"], freq="H") idx7 = pd.period_range("2013Q1", periods=1, freq="Q") @@ -125,7 +125,7 @@ def test_representation_to_series(self): exp5 = """0 2011 1 2012 2 2013 -dtype: period[A-DEC]""" +dtype: period[Y-DEC]""" exp6 = """0 2011-01-01 09:00 1 2012-02-01 10:00 @@ -157,7 +157,7 @@ def test_summary(self): idx2 = PeriodIndex(["2011-01-01"], freq="D") idx3 = PeriodIndex(["2011-01-01", "2011-01-02"], freq="D") idx4 = PeriodIndex(["2011-01-01", "2011-01-02", "2011-01-03"], freq="D") - idx5 = PeriodIndex(["2011", "2012", "2013"], freq="A") + idx5 = PeriodIndex(["2011", "2012", "2013"], freq="Y") idx6 = PeriodIndex(["2011-01-01 09:00", "2012-02-01 10:00", "NaT"], freq="H") idx7 = pd.period_range("2013Q1", periods=1, freq="Q") @@ -177,7 +177,7 @@ def test_summary(self): Freq: D""" exp5 = """PeriodIndex: 3 entries, 2011 to 2013 -Freq: A-DEC""" +Freq: Y-DEC""" exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT Freq: H""" diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py index 109a4a41e2841..f5af550d94ab1 100644 --- a/pandas/tests/indexes/period/test_indexing.py +++ b/pandas/tests/indexes/period/test_indexing.py @@ -238,9 +238,9 @@ def test_getitem_day(self, idx_range): class TestGetLoc: def test_get_loc_msg(self): - idx = period_range("2000-1-1", freq="A", periods=10) - bad_period = Period("2012", "A") - with pytest.raises(KeyError, match=r"^Period\('2012', 'A-DEC'\)$"): + idx = period_range("2000-1-1", freq="Y", periods=10) + bad_period = Period("2012", "Y") + with pytest.raises(KeyError, match=r"^Period\('2012', 'Y-DEC'\)$"): idx.get_loc(bad_period) try: diff --git a/pandas/tests/indexes/period/test_partial_slicing.py b/pandas/tests/indexes/period/test_partial_slicing.py index 3a272f53091b5..5bc76340badaf 100644 --- a/pandas/tests/indexes/period/test_partial_slicing.py +++ b/pandas/tests/indexes/period/test_partial_slicing.py @@ -14,7 +14,7 @@ class TestPeriodIndex: def test_getitem_periodindex_duplicates_string_slice(self, using_copy_on_write): # monotonic - idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq="A-JUN") + idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq="Y-JUN") ts = Series(np.random.default_rng(2).standard_normal(len(idx)), index=idx) original = ts.copy() @@ -28,7 +28,7 @@ def test_getitem_periodindex_duplicates_string_slice(self, using_copy_on_write): assert (ts[1:3] == 1).all() # not monotonic - idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq="A-JUN") + idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq="Y-JUN") ts = Series(np.random.default_rng(2).standard_normal(len(idx)), index=idx) result = ts["2007"] diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index bd40fa37897d8..0c445b4cdf770 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -18,7 +18,7 @@ class TestPeriodIndex: def test_make_time_series(self): - index = period_range(freq="A", start="1/1/2001", end="12/1/2009") + index = period_range(freq="Y", start="1/1/2001", end="12/1/2009") series = Series(1, index=index) assert isinstance(series, Series) @@ -67,7 +67,7 @@ def test_values(self): tm.assert_numpy_array_equal(idx.asi8, exp) def test_period_index_length(self): - pi = period_range(freq="A", start="1/1/2001", end="12/1/2009") + pi = period_range(freq="Y", start="1/1/2001", end="12/1/2009") assert len(pi) == 9 pi = period_range(freq="Q", start="1/1/2001", end="12/1/2009") @@ -157,7 +157,7 @@ def test_period_index_length(self): @pytest.mark.parametrize( "periodindex", [ - period_range(freq="A", start="1/1/2001", end="12/1/2005"), + period_range(freq="Y", start="1/1/2001", end="12/1/2005"), period_range(freq="Q", start="1/1/2001", end="12/1/2002"), period_range(freq="M", start="1/1/2001", end="1/1/2002"), period_range(freq="D", start="12/1/2001", end="6/1/2001"), @@ -187,7 +187,7 @@ def test_fields(self, periodindex, field): assert getattr(x, field) == val def test_is_(self): - create_index = lambda: period_range(freq="A", start="1/1/2001", end="12/1/2009") + create_index = lambda: period_range(freq="Y", start="1/1/2001", end="12/1/2009") index = create_index() assert index.is_(index) assert not index.is_(create_index()) @@ -199,23 +199,23 @@ def test_is_(self): assert ind2.is_(index) assert not index.is_(index[:]) assert not index.is_(index.asfreq("M")) - assert not index.is_(index.asfreq("A")) + assert not index.is_(index.asfreq("Y")) assert not index.is_(index - 2) assert not index.is_(index - 0) def test_index_unique(self): - idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq="A-JUN") - expected = PeriodIndex([2000, 2007, 2009], freq="A-JUN") + idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq="Y-JUN") + expected = PeriodIndex([2000, 2007, 2009], freq="Y-JUN") tm.assert_index_equal(idx.unique(), expected) assert idx.nunique() == 3 def test_negative_ordinals(self): - Period(ordinal=-1000, freq="A") - Period(ordinal=0, freq="A") + Period(ordinal=-1000, freq="Y") + Period(ordinal=0, freq="Y") - idx1 = PeriodIndex(ordinal=[-1, 0, 1], freq="A") - idx2 = PeriodIndex(ordinal=np.array([-1, 0, 1]), freq="A") + idx1 = PeriodIndex(ordinal=[-1, 0, 1], freq="Y") + idx2 = PeriodIndex(ordinal=np.array([-1, 0, 1]), freq="Y") tm.assert_index_equal(idx1, idx2) def test_pindex_fieldaccessor_nat(self): @@ -267,14 +267,14 @@ def test_with_multi_index(self): def test_map(self): # test_map_dictlike generally tests - index = PeriodIndex([2005, 2007, 2009], freq="A") + index = PeriodIndex([2005, 2007, 2009], freq="Y") result = index.map(lambda x: x.ordinal) exp = Index([x.ordinal for x in index]) tm.assert_index_equal(result, exp) def test_format_empty(self): # GH35712 - empty_idx = PeriodIndex([], freq="A") + empty_idx = PeriodIndex([], freq="Y") assert empty_idx.format() == [] assert empty_idx.format(name=True) == [""] @@ -284,6 +284,17 @@ def test_period_index_frequency_ME_error_message(self): with pytest.raises(ValueError, match=msg): PeriodIndex(["2020-01-01", "2020-01-02"], freq="2ME") + @pytest.mark.parametrize("freq", ["2A", "A-DEC", "200A-AUG"]) + def test_a_deprecated_from_time_series(self, freq): + # GH#52536 + freq_msg = freq[freq.index("A") :] + msg = f"'{freq_msg}' is deprecated and will be removed in a future version." + + with tm.assert_produces_warning(FutureWarning, match=msg): + index = period_range(freq=freq, start="1/1/2001", end="12/1/2009") + series = Series(1, index=index) + assert isinstance(series, Series) + def test_maybe_convert_timedelta(): pi = PeriodIndex(["2000", "2001"], freq="D") diff --git a/pandas/tests/indexes/period/test_period_range.py b/pandas/tests/indexes/period/test_period_range.py index 63acaba2d4f3e..bee8a1282d08b 100644 --- a/pandas/tests/indexes/period/test_period_range.py +++ b/pandas/tests/indexes/period/test_period_range.py @@ -20,7 +20,7 @@ def test_required_arguments(self): with pytest.raises(ValueError, match=msg): period_range("2011-1-1", "2012-1-1", "B") - @pytest.mark.parametrize("freq", ["D", "W", "Q", "A"]) + @pytest.mark.parametrize("freq", ["D", "W", "Q", "Y"]) def test_construction_from_string(self, freq): # non-empty expected = date_range( diff --git a/pandas/tests/indexes/period/test_pickle.py b/pandas/tests/indexes/period/test_pickle.py index cb981ab10064f..7d359fdabb6f1 100644 --- a/pandas/tests/indexes/period/test_pickle.py +++ b/pandas/tests/indexes/period/test_pickle.py @@ -12,7 +12,7 @@ class TestPickle: - @pytest.mark.parametrize("freq", ["D", "M", "A"]) + @pytest.mark.parametrize("freq", ["D", "M", "Y"]) def test_pickle_round_trip(self, freq): idx = PeriodIndex(["2016-05-16", "NaT", NaT, np.nan], freq=freq) result = tm.round_trip_pickle(idx) diff --git a/pandas/tests/indexes/period/test_resolution.py b/pandas/tests/indexes/period/test_resolution.py index 6c876b4f9366f..98ccfe6569798 100644 --- a/pandas/tests/indexes/period/test_resolution.py +++ b/pandas/tests/indexes/period/test_resolution.py @@ -7,7 +7,7 @@ class TestResolution: @pytest.mark.parametrize( "freq,expected", [ - ("A", "year"), + ("Y", "year"), ("Q", "quarter"), ("M", "month"), ("D", "day"), diff --git a/pandas/tests/indexes/period/test_setops.py b/pandas/tests/indexes/period/test_setops.py index dd05210e417b0..9610db5f0336b 100644 --- a/pandas/tests/indexes/period/test_setops.py +++ b/pandas/tests/indexes/period/test_setops.py @@ -81,8 +81,8 @@ def test_union(self, sort): other6 = period_range("2000-04-01", freq="M", periods=7) expected6 = period_range("2000-01-01", freq="M", periods=10) - rng7 = period_range("2003-01-01", freq="A", periods=5) - other7 = period_range("1998-01-01", freq="A", periods=8) + rng7 = period_range("2003-01-01", freq="Y", periods=5) + other7 = period_range("1998-01-01", freq="Y", periods=8) expected7 = PeriodIndex( [ "2003", @@ -96,7 +96,7 @@ def test_union(self, sort): "2001", "2002", ], - freq="A", + freq="Y", ) rng8 = PeriodIndex( @@ -293,9 +293,9 @@ def test_difference(self, sort): expected6 = PeriodIndex(["2000-02-01", "2000-01-01", "2000-03-01"], freq="M") period_rng = ["2003", "2007", "2006", "2005", "2004"] - rng7 = PeriodIndex(period_rng, freq="A") - other7 = period_range("1998-01-01", freq="A", periods=8) - expected7 = PeriodIndex(["2007", "2006"], freq="A") + rng7 = PeriodIndex(period_rng, freq="Y") + other7 = period_range("1998-01-01", freq="Y", periods=8) + expected7 = PeriodIndex(["2007", "2006"], freq="Y") for rng, other, expected in [ (rng1, other1, expected1), diff --git a/pandas/tests/indexes/period/test_tools.py b/pandas/tests/indexes/period/test_tools.py index 18668fd357fd8..2a9149844a353 100644 --- a/pandas/tests/indexes/period/test_tools.py +++ b/pandas/tests/indexes/period/test_tools.py @@ -27,7 +27,7 @@ class TestPeriodRepresentation: ("us", "1970-01-01"), ("ns", "1970-01-01"), ("M", "1970-01"), - ("A", 1970), + ("Y", 1970), ], ) @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") @@ -43,7 +43,7 @@ def test_freq(self, freq, base_date): class TestPeriodIndexConversion: def test_tolist(self): - index = period_range(freq="A", start="1/1/2001", end="12/1/2009") + index = period_range(freq="Y", start="1/1/2001", end="12/1/2009") rs = index.tolist() for x in rs: assert isinstance(x, Period) diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index bc04c1c6612f4..632d52c3f68cd 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -980,7 +980,7 @@ def test_str_attribute(self, method): Index(range(5)), tm.makeDateIndex(10), MultiIndex.from_tuples([("foo", "1"), ("bar", "3")]), - period_range(start="2000", end="2010", freq="A"), + period_range(start="2000", end="2010", freq="Y"), ], ) def test_str_attribute_raises(self, index): diff --git a/pandas/tests/indexes/timedeltas/test_timedelta_range.py b/pandas/tests/indexes/timedeltas/test_timedelta_range.py index d0593b3230959..03531547ef042 100644 --- a/pandas/tests/indexes/timedeltas/test_timedelta_range.py +++ b/pandas/tests/indexes/timedeltas/test_timedelta_range.py @@ -3,6 +3,7 @@ from pandas import ( Timedelta, + TimedeltaIndex, timedelta_range, to_timedelta, ) @@ -119,3 +120,42 @@ def test_timedelta_range_infer_freq(self): # https://github.com/pandas-dev/pandas/issues/35897 result = timedelta_range("0s", "1s", periods=31) assert result.freq is None + + @pytest.mark.parametrize( + "freq_depr, start, end, expected_values, expected_freq", + [ + ( + "3.5S", + "05:03:01", + "05:03:10", + ["0 days 05:03:01", "0 days 05:03:04.500000", "0 days 05:03:08"], + "3500ms", + ), + ( + "2.5T", + "5 hours", + "5 hours 8 minutes", + [ + "0 days 05:00:00", + "0 days 05:02:30", + "0 days 05:05:00", + "0 days 05:07:30", + ], + "150s", + ), + ], + ) + def test_timedelta_range_deprecated_freq( + self, freq_depr, start, end, expected_values, expected_freq + ): + # GH#52536 + msg = ( + f"'{freq_depr[-1]}' is deprecated and will be removed in a future version." + ) + + with tm.assert_produces_warning(FutureWarning, match=msg): + result = timedelta_range(start=start, end=end, freq=freq_depr) + expected = TimedeltaIndex( + expected_values, dtype="timedelta64[ns]", freq=expected_freq + ) + tm.assert_index_equal(result, expected) diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index a2693c85e507f..1177d8df6030d 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -260,19 +260,19 @@ def test_loc_npstr(self): @pytest.mark.parametrize( "msg, key", [ - (r"Period\('2019', 'A-DEC'\), 'foo', 'bar'", (Period(2019), "foo", "bar")), - (r"Period\('2019', 'A-DEC'\), 'y1', 'bar'", (Period(2019), "y1", "bar")), - (r"Period\('2019', 'A-DEC'\), 'foo', 'z1'", (Period(2019), "foo", "z1")), + (r"Period\('2019', 'Y-DEC'\), 'foo', 'bar'", (Period(2019), "foo", "bar")), + (r"Period\('2019', 'Y-DEC'\), 'y1', 'bar'", (Period(2019), "y1", "bar")), + (r"Period\('2019', 'Y-DEC'\), 'foo', 'z1'", (Period(2019), "foo", "z1")), ( - r"Period\('2018', 'A-DEC'\), Period\('2016', 'A-DEC'\), 'bar'", + r"Period\('2018', 'Y-DEC'\), Period\('2016', 'Y-DEC'\), 'bar'", (Period(2018), Period(2016), "bar"), ), - (r"Period\('2018', 'A-DEC'\), 'foo', 'y1'", (Period(2018), "foo", "y1")), + (r"Period\('2018', 'Y-DEC'\), 'foo', 'y1'", (Period(2018), "foo", "y1")), ( - r"Period\('2017', 'A-DEC'\), 'foo', Period\('2015', 'A-DEC'\)", + r"Period\('2017', 'Y-DEC'\), 'foo', Period\('2015', 'Y-DEC'\)", (Period(2017), "foo", Period(2015)), ), - (r"Period\('2017', 'A-DEC'\), 'z1', 'bar'", (Period(2017), "z1", "bar")), + (r"Period\('2017', 'Y-DEC'\), 'z1', 'bar'", (Period(2017), "z1", "bar")), ], ) def test_contains_raise_error_if_period_index_is_in_multi_index(self, msg, key): diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 597bc2975268e..b97da11bc4252 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -1335,13 +1335,13 @@ def test_interval_can_hold_element(self, dtype, element): assert not blk._can_hold_element(elem) def test_period_can_hold_element_emptylist(self): - pi = period_range("2016", periods=3, freq="A") + pi = period_range("2016", periods=3, freq="Y") blk = new_block(pi._data.reshape(1, 3), BlockPlacement([1]), ndim=2) assert blk._can_hold_element([]) def test_period_can_hold_element(self, element): - pi = period_range("2016", periods=3, freq="A") + pi = period_range("2016", periods=3, freq="Y") elem = element(pi) self.check_series_setitem(elem, pi, True) diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py index fc2edc7559a48..d7a4dfca90b5e 100644 --- a/pandas/tests/io/json/test_json_table_schema.py +++ b/pandas/tests/io/json/test_json_table_schema.py @@ -150,7 +150,7 @@ def test_as_json_table_type_bool_data(self, bool_type): pd.to_datetime(["2016"], utc=True), pd.Series(pd.to_datetime(["2016"])), pd.Series(pd.to_datetime(["2016"], utc=True)), - pd.period_range("2016", freq="A", periods=3), + pd.period_range("2016", freq="Y", periods=3), ], ) def test_as_json_table_type_date_data(self, date_data): @@ -480,9 +480,9 @@ def test_convert_pandas_type_to_json_field_datetime( assert result == expected def test_convert_pandas_type_to_json_period_range(self): - arr = pd.period_range("2016", freq="A-DEC", periods=4) + arr = pd.period_range("2016", freq="Y-DEC", periods=4) result = convert_pandas_type_to_json_field(arr) - expected = {"name": "values", "type": "datetime", "freq": "A-DEC"} + expected = {"name": "values", "type": "datetime", "freq": "Y-DEC"} assert result == expected @pytest.mark.parametrize("kind", [pd.Categorical, pd.CategoricalIndex]) diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index 220741cf1ec3d..f488ee7da87ac 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -102,7 +102,7 @@ def test_is_error_nozeroindex(self): _check_plot_works(a.plot, yerr=a) def test_nonnumeric_exclude(self): - idx = date_range("1/1/1987", freq="A", periods=3) + idx = date_range("1/1/1987", freq="Y", periods=3) df = DataFrame({"A": ["x", "y", "z"], "B": [1, 2, 3]}, idx) fig, ax = mpl.pyplot.subplots() @@ -111,13 +111,13 @@ def test_nonnumeric_exclude(self): mpl.pyplot.close(fig) def test_nonnumeric_exclude_error(self): - idx = date_range("1/1/1987", freq="A", periods=3) + idx = date_range("1/1/1987", freq="Y", periods=3) df = DataFrame({"A": ["x", "y", "z"], "B": [1, 2, 3]}, idx) msg = "no numeric data to plot" with pytest.raises(TypeError, match=msg): df["A"].plot() - @pytest.mark.parametrize("freq", ["s", "min", "H", "D", "W", "M", "Q", "A"]) + @pytest.mark.parametrize("freq", ["s", "min", "H", "D", "W", "M", "Q", "Y"]) def test_tsplot_period(self, freq): idx = period_range("12/31/1999", freq=freq, periods=100) ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx) @@ -125,7 +125,7 @@ def test_tsplot_period(self, freq): _check_plot_works(ser.plot, ax=ax) @pytest.mark.parametrize( - "freq", ["s", "min", "H", "D", "W", "ME", "Q-DEC", "A", "1B30Min"] + "freq", ["s", "min", "H", "D", "W", "ME", "Q-DEC", "Y", "1B30Min"] ) def test_tsplot_datetime(self, freq): idx = date_range("12/31/1999", freq=freq, periods=100) @@ -165,8 +165,8 @@ def test_get_datevalue(self): from pandas.plotting._matplotlib.converter import get_datevalue assert get_datevalue(None, "D") is None - assert get_datevalue(1987, "A") == 1987 - assert get_datevalue(Period(1987, "A"), "M") == Period("1987-12", "M").ordinal + assert get_datevalue(1987, "Y") == 1987 + assert get_datevalue(Period(1987, "Y"), "M") == Period("1987-12", "M").ordinal assert get_datevalue("1/1/1987", "D") == Period("1987-1-1", "D").ordinal def test_ts_plot_format_coord(self): @@ -176,7 +176,7 @@ def check_format_of_first_point(ax, expected_string): first_y = first_line.get_ydata()[0] assert expected_string == ax.format_coord(first_x, first_y) - annual = Series(1, index=date_range("2014-01-01", periods=3, freq="A-DEC")) + annual = Series(1, index=date_range("2014-01-01", periods=3, freq="Y-DEC")) _, ax = mpl.pyplot.subplots() annual.plot(ax=ax) check_format_of_first_point(ax, "t = 2014 y = 1.000000") @@ -187,14 +187,14 @@ def check_format_of_first_point(ax, expected_string): daily.plot(ax=ax) check_format_of_first_point(ax, "t = 2014-01-01 y = 1.000000") - @pytest.mark.parametrize("freq", ["s", "min", "H", "D", "W", "M", "Q", "A"]) + @pytest.mark.parametrize("freq", ["s", "min", "H", "D", "W", "M", "Q", "Y"]) def test_line_plot_period_series(self, freq): idx = period_range("12/31/1999", freq=freq, periods=100) ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx) _check_plot_works(ser.plot, ser.index.freq) @pytest.mark.parametrize( - "frqncy", ["1s", "3s", "5min", "7H", "4D", "8W", "11M", "3A"] + "frqncy", ["1s", "3s", "5min", "7H", "4D", "8W", "11M", "3Y"] ) def test_line_plot_period_mlt_series(self, frqncy): # test period index line plot for series with multiples (`mlt`) of the @@ -204,14 +204,14 @@ def test_line_plot_period_mlt_series(self, frqncy): _check_plot_works(s.plot, s.index.freq.rule_code) @pytest.mark.parametrize( - "freq", ["s", "min", "H", "D", "W", "ME", "Q-DEC", "A", "1B30Min"] + "freq", ["s", "min", "H", "D", "W", "ME", "Q-DEC", "Y", "1B30Min"] ) def test_line_plot_datetime_series(self, freq): idx = date_range("12/31/1999", freq=freq, periods=100) ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx) _check_plot_works(ser.plot, ser.index.freq.rule_code) - @pytest.mark.parametrize("freq", ["s", "min", "H", "D", "W", "ME", "Q", "A"]) + @pytest.mark.parametrize("freq", ["s", "min", "H", "D", "W", "ME", "Q", "Y"]) def test_line_plot_period_frame(self, freq): idx = date_range("12/31/1999", freq=freq, periods=100) df = DataFrame( @@ -222,7 +222,7 @@ def test_line_plot_period_frame(self, freq): _check_plot_works(df.plot, df.index.freq) @pytest.mark.parametrize( - "frqncy", ["1s", "3s", "5min", "7H", "4D", "8W", "11M", "3A"] + "frqncy", ["1s", "3s", "5min", "7H", "4D", "8W", "11M", "3Y"] ) def test_line_plot_period_mlt_frame(self, frqncy): # test period index line plot for DataFrames with multiples (`mlt`) @@ -240,7 +240,7 @@ def test_line_plot_period_mlt_frame(self, frqncy): @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") @pytest.mark.parametrize( - "freq", ["s", "min", "H", "D", "W", "ME", "Q-DEC", "A", "1B30Min"] + "freq", ["s", "min", "H", "D", "W", "ME", "Q-DEC", "Y", "1B30Min"] ) def test_line_plot_datetime_frame(self, freq): idx = date_range("12/31/1999", freq=freq, periods=100) @@ -254,7 +254,7 @@ def test_line_plot_datetime_frame(self, freq): _check_plot_works(df.plot, freq) @pytest.mark.parametrize( - "freq", ["s", "min", "H", "D", "W", "ME", "Q-DEC", "A", "1B30Min"] + "freq", ["s", "min", "H", "D", "W", "ME", "Q-DEC", "Y", "1B30Min"] ) def test_line_plot_inferred_freq(self, freq): idx = date_range("12/31/1999", freq=freq, periods=100) @@ -440,7 +440,7 @@ def test_get_finder(self): assert conv.get_finder(to_offset("D")) == conv._daily_finder assert conv.get_finder(to_offset("ME")) == conv._monthly_finder assert conv.get_finder(to_offset("Q")) == conv._quarterly_finder - assert conv.get_finder(to_offset("A")) == conv._annual_finder + assert conv.get_finder(to_offset("Y")) == conv._annual_finder assert conv.get_finder(to_offset("W")) == conv._daily_finder def test_finder_daily(self): @@ -523,10 +523,10 @@ def test_finder_monthly_long(self): def test_finder_annual(self): xp = [1987, 1988, 1990, 1990, 1995, 2020, 2070, 2170] - xp = [Period(x, freq="A").ordinal for x in xp] + xp = [Period(x, freq="Y").ordinal for x in xp] rs = [] for nyears in [5, 10, 19, 49, 99, 199, 599, 1001]: - rng = period_range("1987", periods=nyears, freq="A") + rng = period_range("1987", periods=nyears, freq="Y") ser = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) _, ax = mpl.pyplot.subplots() ser.plot(ax=ax) diff --git a/pandas/tests/resample/test_base.py b/pandas/tests/resample/test_base.py index ae28a010d8435..ad6ff70b14d25 100644 --- a/pandas/tests/resample/test_base.py +++ b/pandas/tests/resample/test_base.py @@ -96,7 +96,7 @@ def test_raises_on_non_datetimelike_index(): "but got an instance of 'RangeIndex'" ) with pytest.raises(TypeError, match=msg): - xp.resample("A") + xp.resample("Y") @all_ts diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py index 113e2d8986ad2..b40ae7d2bd8c0 100644 --- a/pandas/tests/resample/test_datetime_index.py +++ b/pandas/tests/resample/test_datetime_index.py @@ -435,7 +435,7 @@ def test_resample_frame_basic_cy_funcs(f, unit): g._cython_agg_general(f, alt=None, numeric_only=True) -@pytest.mark.parametrize("freq", ["A", "ME"]) +@pytest.mark.parametrize("freq", ["Y", "ME"]) def test_resample_frame_basic_M_A(freq, unit): df = tm.makeTimeDataFrame() df.index = df.index.as_unit(unit) @@ -668,8 +668,8 @@ def test_resample_reresample(unit): @pytest.mark.parametrize( "freq, expected_kwargs", [ - ["A-DEC", {"start": "1990", "end": "2000", "freq": "a-dec"}], - ["A-JUN", {"start": "1990", "end": "2000", "freq": "a-jun"}], + ["Y-DEC", {"start": "1990", "end": "2000", "freq": "y-dec"}], + ["Y-JUN", {"start": "1990", "end": "2000", "freq": "y-jun"}], ["ME", {"start": "1990-01", "end": "2000-01", "freq": "M"}], ], ) @@ -1246,7 +1246,7 @@ def test_corner_cases_period(simple_period_range_series): # miscellaneous test coverage len0pts = simple_period_range_series("2007-01", "2010-05", freq="M")[:0] # it works - result = len0pts.resample("A-DEC").mean() + result = len0pts.resample("Y-DEC").mean() assert len(result) == 0 @@ -2021,7 +2021,7 @@ def test_long_rule_non_nano(): "1900-12-31", ] ).astype("datetime64[s]"), - freq="200A-DEC", + freq="200Y-DEC", ) expected = Series([1.0, 3.0, 6.5, 4.0, 3.0, 6.5, 4.0, 3.0, 6.5], index=expected_idx) tm.assert_series_equal(result, expected) @@ -2044,7 +2044,7 @@ def test_resample_empty_series_with_tz(): def test_resample_M_deprecated(): - depr_msg = r"\'M\' will be deprecated, please use \'ME\' for \'month end\'" + depr_msg = "'M' will be deprecated, please use 'ME' instead." s = Series(range(10), index=date_range("20130101", freq="d", periods=10)) expected = s.resample("2ME").mean() diff --git a/pandas/tests/resample/test_period_index.py b/pandas/tests/resample/test_period_index.py index db3804a6600b9..f3e2eecf63d6b 100644 --- a/pandas/tests/resample/test_period_index.py +++ b/pandas/tests/resample/test_period_index.py @@ -109,7 +109,7 @@ def test_selection(self, index, freq, kind, kwargs): def test_annual_upsample_cases( self, offset, period, conv, meth, month, simple_period_range_series ): - ts = simple_period_range_series("1/1/1990", "12/31/1991", freq=f"A-{month}") + ts = simple_period_range_series("1/1/1990", "12/31/1991", freq=f"Y-{month}") warn = FutureWarning if period == "B" else None msg = r"PeriodDtype\[B\] is deprecated" with tm.assert_produces_warning(warn, match=msg): @@ -120,20 +120,20 @@ def test_annual_upsample_cases( def test_basic_downsample(self, simple_period_range_series): ts = simple_period_range_series("1/1/1990", "6/30/1995", freq="M") - result = ts.resample("a-dec").mean() + result = ts.resample("y-dec").mean() expected = ts.groupby(ts.index.year).mean() - expected.index = period_range("1/1/1990", "6/30/1995", freq="a-dec") + expected.index = period_range("1/1/1990", "6/30/1995", freq="y-dec") tm.assert_series_equal(result, expected) # this is ok - tm.assert_series_equal(ts.resample("a-dec").mean(), result) - tm.assert_series_equal(ts.resample("a").mean(), result) + tm.assert_series_equal(ts.resample("y-dec").mean(), result) + tm.assert_series_equal(ts.resample("y").mean(), result) @pytest.mark.parametrize( "rule,expected_error_msg", [ - ("a-dec", "<YearEnd: month=12>"), + ("y-dec", "<YearEnd: month=12>"), ("q-mar", "<QuarterEnd: startingMonth=3>"), ("M", "<MonthEnd>"), ("w-thu", "<Week: weekday=3>"), @@ -152,7 +152,7 @@ def test_not_subperiod(self, simple_period_range_series, rule, expected_error_ms @pytest.mark.parametrize("freq", ["D", "2D"]) def test_basic_upsample(self, freq, simple_period_range_series): ts = simple_period_range_series("1/1/1990", "6/30/1995", freq="M") - result = ts.resample("a-dec").mean() + result = ts.resample("y-dec").mean() resampled = result.resample(freq, convention="end").ffill() expected = result.to_timestamp(freq, how="end") @@ -160,7 +160,7 @@ def test_basic_upsample(self, freq, simple_period_range_series): tm.assert_series_equal(resampled, expected) def test_upsample_with_limit(self): - rng = period_range("1/1/2000", periods=5, freq="A") + rng = period_range("1/1/2000", periods=5, freq="Y") ts = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) result = ts.resample("M", convention="end").ffill(limit=2) @@ -168,13 +168,13 @@ def test_upsample_with_limit(self): tm.assert_series_equal(result, expected) def test_annual_upsample(self, simple_period_range_series): - ts = simple_period_range_series("1/1/1990", "12/31/1995", freq="A-DEC") + ts = simple_period_range_series("1/1/1990", "12/31/1995", freq="Y-DEC") df = DataFrame({"a": ts}) rdf = df.resample("D").ffill() exp = df["a"].resample("D").ffill() tm.assert_series_equal(rdf["a"], exp) - rng = period_range("2000", "2003", freq="A-DEC") + rng = period_range("2000", "2003", freq="Y-DEC") ts = Series([1, 2, 3, 4], index=rng) result = ts.resample("M").ffill() @@ -391,13 +391,13 @@ def test_weekly_upsample(self, day, target, convention, simple_period_range_seri def test_resample_to_timestamps(self, simple_period_range_series): ts = simple_period_range_series("1/1/1990", "12/31/1995", freq="M") - result = ts.resample("A-DEC", kind="timestamp").mean() - expected = ts.to_timestamp(how="start").resample("A-DEC").mean() + result = ts.resample("Y-DEC", kind="timestamp").mean() + expected = ts.to_timestamp(how="start").resample("Y-DEC").mean() tm.assert_series_equal(result, expected) @pytest.mark.parametrize("month", MONTHS) def test_resample_to_quarterly(self, simple_period_range_series, month): - ts = simple_period_range_series("1990", "1992", freq=f"A-{month}") + ts = simple_period_range_series("1990", "1992", freq=f"Y-{month}") quar_ts = ts.resample(f"Q-{month}").ffill() stamps = ts.to_timestamp("D", how="start") @@ -415,7 +415,7 @@ def test_resample_to_quarterly(self, simple_period_range_series, month): @pytest.mark.parametrize("how", ["start", "end"]) def test_resample_to_quarterly_start_end(self, simple_period_range_series, how): # conforms, but different month - ts = simple_period_range_series("1990", "1992", freq="A-JUN") + ts = simple_period_range_series("1990", "1992", freq="Y-JUN") result = ts.resample("Q-MAR", convention=how).ffill() expected = ts.asfreq("Q-MAR", how=how) expected = expected.reindex(result.index, method="ffill") @@ -426,21 +426,21 @@ def test_resample_to_quarterly_start_end(self, simple_period_range_series, how): tm.assert_series_equal(result, expected) def test_resample_fill_missing(self): - rng = PeriodIndex([2000, 2005, 2007, 2009], freq="A") + rng = PeriodIndex([2000, 2005, 2007, 2009], freq="Y") s = Series(np.random.default_rng(2).standard_normal(4), index=rng) stamps = s.to_timestamp() - filled = s.resample("A").ffill() - expected = stamps.resample("A").ffill().to_period("A") + filled = s.resample("Y").ffill() + expected = stamps.resample("Y").ffill().to_period("Y") tm.assert_series_equal(filled, expected) def test_cant_fill_missing_dups(self): - rng = PeriodIndex([2000, 2005, 2005, 2007, 2007], freq="A") + rng = PeriodIndex([2000, 2005, 2005, 2007, 2007], freq="Y") s = Series(np.random.default_rng(2).standard_normal(5), index=rng) msg = "Reindexing only valid with uniquely valued Index objects" with pytest.raises(InvalidIndexError, match=msg): - s.resample("A").ffill() + s.resample("Y").ffill() @pytest.mark.parametrize("freq", ["5min"]) @pytest.mark.parametrize("kind", ["period", None, "timestamp"]) @@ -537,13 +537,13 @@ def test_resample_tz_localized(self): ts["second"] = np.cumsum(np.random.default_rng(2).standard_normal(len(rng))) expected = DataFrame( { - "first": ts.resample("A").sum()["first"], - "second": ts.resample("A").mean()["second"], + "first": ts.resample("Y").sum()["first"], + "second": ts.resample("Y").mean()["second"], }, columns=["first", "second"], ) result = ( - ts.resample("A") + ts.resample("Y") .agg({"first": "sum", "second": "mean"}) .reindex(columns=["first", "second"]) ) @@ -573,8 +573,8 @@ def test_quarterly_resampling(self): rng = period_range("2000Q1", periods=10, freq="Q-DEC") ts = Series(np.arange(10), index=rng) - result = ts.resample("A").mean() - exp = ts.to_timestamp().resample("A").mean().to_period() + result = ts.resample("Y").mean() + exp = ts.to_timestamp().resample("Y").mean().to_period() tm.assert_series_equal(result, exp) def test_resample_weekly_bug_1726(self): @@ -647,7 +647,7 @@ def test_monthly_convention_span(self): tm.assert_series_equal(result, expected) @pytest.mark.parametrize( - "from_freq, to_freq", [("D", "ME"), ("Q", "A"), ("ME", "Q"), ("D", "W")] + "from_freq, to_freq", [("D", "ME"), ("Q", "Y"), ("ME", "Q"), ("D", "W")] ) def test_default_right_closed_label(self, from_freq, to_freq): idx = date_range(start="8/15/2012", periods=100, freq=from_freq) @@ -676,7 +676,7 @@ def test_all_values_single_bin(self): index = period_range(start="2012-01-01", end="2012-12-31", freq="M") s = Series(np.random.default_rng(2).standard_normal(len(index)), index=index) - result = s.resample("A").mean() + result = s.resample("Y").mean() tm.assert_almost_equal(result.iloc[0], s.mean()) def test_evenly_divisible_with_no_extra_bins(self): diff --git a/pandas/tests/resample/test_time_grouper.py b/pandas/tests/resample/test_time_grouper.py index 3e0922228cb74..c00366b2e28ce 100644 --- a/pandas/tests/resample/test_time_grouper.py +++ b/pandas/tests/resample/test_time_grouper.py @@ -24,7 +24,7 @@ def test_series(): def test_apply(test_series): - grouper = Grouper(freq="A", label="right", closed="right") + grouper = Grouper(freq="Y", label="right", closed="right") grouped = test_series.groupby(grouper) @@ -44,18 +44,18 @@ def test_count(test_series): expected = test_series.groupby(lambda x: x.year).count() - grouper = Grouper(freq="A", label="right", closed="right") + grouper = Grouper(freq="Y", label="right", closed="right") result = test_series.groupby(grouper).count() expected.index = result.index tm.assert_series_equal(result, expected) - result = test_series.resample("A").count() + result = test_series.resample("Y").count() expected.index = result.index tm.assert_series_equal(result, expected) def test_numpy_reduction(test_series): - result = test_series.resample("A", closed="right").prod() + result = test_series.resample("Y", closed="right").prod() msg = "using SeriesGroupBy.prod" with tm.assert_produces_warning(FutureWarning, match=msg): diff --git a/pandas/tests/reshape/concat/test_concat.py b/pandas/tests/reshape/concat/test_concat.py index 5dde863f246d1..aa1a74aadae12 100644 --- a/pandas/tests/reshape/concat/test_concat.py +++ b/pandas/tests/reshape/concat/test_concat.py @@ -30,8 +30,8 @@ class TestConcatenate: def test_append_concat(self): # GH#1815 - d1 = date_range("12/31/1990", "12/31/1999", freq="A-DEC") - d2 = date_range("12/31/2000", "12/31/2009", freq="A-DEC") + d1 = date_range("12/31/1990", "12/31/1999", freq="Y-DEC") + d2 = date_range("12/31/2000", "12/31/2009", freq="Y-DEC") s1 = Series(np.random.default_rng(2).standard_normal(10), d1) s2 = Series(np.random.default_rng(2).standard_normal(10), d2) diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index 8435f4a189c56..2d41b6d355ead 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -445,10 +445,10 @@ def test_pivot_no_values(self): tm.assert_frame_equal(res, exp) res = df.pivot_table( - index=Grouper(freq="A"), columns=Grouper(key="dt", freq="ME") + index=Grouper(freq="Y"), columns=Grouper(key="dt", freq="ME") ) exp = DataFrame( - [3.0], index=pd.DatetimeIndex(["2011-12-31"], freq="A"), columns=exp_columns + [3.0], index=pd.DatetimeIndex(["2011-12-31"], freq="Y"), columns=exp_columns ) tm.assert_frame_equal(res, exp) @@ -1273,7 +1273,7 @@ def test_pivot_timegrouper(self, using_array_manager): expected = DataFrame( np.array([10, 18, 3], dtype="int64").reshape(1, 3), - index=pd.DatetimeIndex([datetime(2013, 12, 31)], freq="A"), + index=pd.DatetimeIndex([datetime(2013, 12, 31)], freq="Y"), columns="Carl Joe Mark".split(), ) expected.index.name = "Date" @@ -1281,7 +1281,7 @@ def test_pivot_timegrouper(self, using_array_manager): result = pivot_table( df, - index=Grouper(freq="A"), + index=Grouper(freq="Y"), columns="Buyer", values="Quantity", aggfunc="sum", @@ -1291,7 +1291,7 @@ def test_pivot_timegrouper(self, using_array_manager): result = pivot_table( df, index="Buyer", - columns=Grouper(freq="A"), + columns=Grouper(freq="Y"), values="Quantity", aggfunc="sum", ) diff --git a/pandas/tests/scalar/period/test_asfreq.py b/pandas/tests/scalar/period/test_asfreq.py index 00285148a3c90..4287a69823aef 100644 --- a/pandas/tests/scalar/period/test_asfreq.py +++ b/pandas/tests/scalar/period/test_asfreq.py @@ -18,7 +18,7 @@ class TestFreqConversion: """Test frequency conversion of date objects""" @pytest.mark.filterwarnings("ignore:Period with BDay:FutureWarning") - @pytest.mark.parametrize("freq", ["A", "Q", "M", "W", "B", "D"]) + @pytest.mark.parametrize("freq", ["Y", "Q", "M", "W", "B", "D"]) def test_asfreq_near_zero(self, freq): # GH#19643, GH#19650 per = Period("0001-01-01", freq=freq) @@ -49,7 +49,7 @@ def test_to_timestamp_out_of_bounds(self): per.to_timestamp() def test_asfreq_corner(self): - val = Period(freq="A", year=2007) + val = Period(freq="Y", year=2007) result1 = val.asfreq("5min") result2 = val.asfreq("min") expected = Period("2007-12-31 23:59", freq="min") @@ -61,11 +61,11 @@ def test_asfreq_corner(self): def test_conv_annual(self): # frequency conversion tests: from Annual Frequency - ival_A = Period(freq="A", year=2007) + ival_A = Period(freq="Y", year=2007) - ival_AJAN = Period(freq="A-JAN", year=2007) - ival_AJUN = Period(freq="A-JUN", year=2007) - ival_ANOV = Period(freq="A-NOV", year=2007) + ival_AJAN = Period(freq="Y-JAN", year=2007) + ival_AJUN = Period(freq="Y-JUN", year=2007) + ival_ANOV = Period(freq="Y-NOV", year=2007) ival_A_to_Q_start = Period(freq="Q", year=2007, quarter=1) ival_A_to_Q_end = Period(freq="Q", year=2007, quarter=4) @@ -133,7 +133,7 @@ def test_conv_annual(self): assert ival_ANOV.asfreq("D", "s") == ival_ANOV_to_D_start assert ival_ANOV.asfreq("D", "E") == ival_ANOV_to_D_end - assert ival_A.asfreq("A") == ival_A + assert ival_A.asfreq("Y") == ival_A def test_conv_quarterly(self): # frequency conversion tests: from Quarterly Frequency @@ -144,7 +144,7 @@ def test_conv_quarterly(self): ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1) ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1) - ival_Q_to_A = Period(freq="A", year=2007) + ival_Q_to_A = Period(freq="Y", year=2007) ival_Q_to_M_start = Period(freq="M", year=2007, month=1) ival_Q_to_M_end = Period(freq="M", year=2007, month=3) ival_Q_to_W_start = Period(freq="W", year=2007, month=1, day=1) @@ -175,8 +175,8 @@ def test_conv_quarterly(self): ival_QEJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1) ival_QEJUN_to_D_end = Period(freq="D", year=2006, month=9, day=30) - assert ival_Q.asfreq("A") == ival_Q_to_A - assert ival_Q_end_of_year.asfreq("A") == ival_Q_to_A + assert ival_Q.asfreq("Y") == ival_Q_to_A + assert ival_Q_end_of_year.asfreq("Y") == ival_Q_to_A assert ival_Q.asfreq("M", "s") == ival_Q_to_M_start assert ival_Q.asfreq("M", "E") == ival_Q_to_M_end @@ -207,7 +207,7 @@ def test_conv_monthly(self): ival_M = Period(freq="M", year=2007, month=1) ival_M_end_of_year = Period(freq="M", year=2007, month=12) ival_M_end_of_quarter = Period(freq="M", year=2007, month=3) - ival_M_to_A = Period(freq="A", year=2007) + ival_M_to_A = Period(freq="Y", year=2007) ival_M_to_Q = Period(freq="Q", year=2007, quarter=1) ival_M_to_W_start = Period(freq="W", year=2007, month=1, day=1) ival_M_to_W_end = Period(freq="W", year=2007, month=1, day=31) @@ -231,8 +231,8 @@ def test_conv_monthly(self): freq="s", year=2007, month=1, day=31, hour=23, minute=59, second=59 ) - assert ival_M.asfreq("A") == ival_M_to_A - assert ival_M_end_of_year.asfreq("A") == ival_M_to_A + assert ival_M.asfreq("Y") == ival_M_to_A + assert ival_M_end_of_year.asfreq("Y") == ival_M_to_A assert ival_M.asfreq("Q") == ival_M_to_Q assert ival_M_end_of_quarter.asfreq("Q") == ival_M_to_Q @@ -282,14 +282,14 @@ def test_conv_weekly(self): ival_W_end_of_year = Period(freq="W", year=2007, month=12, day=31) ival_W_end_of_quarter = Period(freq="W", year=2007, month=3, day=31) ival_W_end_of_month = Period(freq="W", year=2007, month=1, day=31) - ival_W_to_A = Period(freq="A", year=2007) + ival_W_to_A = Period(freq="Y", year=2007) ival_W_to_Q = Period(freq="Q", year=2007, quarter=1) ival_W_to_M = Period(freq="M", year=2007, month=1) if Period(freq="D", year=2007, month=12, day=31).weekday == 6: - ival_W_to_A_end_of_year = Period(freq="A", year=2007) + ival_W_to_A_end_of_year = Period(freq="Y", year=2007) else: - ival_W_to_A_end_of_year = Period(freq="A", year=2008) + ival_W_to_A_end_of_year = Period(freq="Y", year=2008) if Period(freq="D", year=2007, month=3, day=31).weekday == 6: ival_W_to_Q_end_of_quarter = Period(freq="Q", year=2007, quarter=1) @@ -321,8 +321,8 @@ def test_conv_weekly(self): freq="s", year=2007, month=1, day=7, hour=23, minute=59, second=59 ) - assert ival_W.asfreq("A") == ival_W_to_A - assert ival_W_end_of_year.asfreq("A") == ival_W_to_A_end_of_year + assert ival_W.asfreq("Y") == ival_W_to_A + assert ival_W_end_of_year.asfreq("Y") == ival_W_to_A_end_of_year assert ival_W.asfreq("Q") == ival_W_to_Q assert ival_W_end_of_quarter.asfreq("Q") == ival_W_to_Q_end_of_quarter @@ -394,7 +394,7 @@ def test_conv_business(self): ival_B_end_of_month = Period(freq="B", year=2007, month=1, day=31) ival_B_end_of_week = Period(freq="B", year=2007, month=1, day=5) - ival_B_to_A = Period(freq="A", year=2007) + ival_B_to_A = Period(freq="Y", year=2007) ival_B_to_Q = Period(freq="Q", year=2007, quarter=1) ival_B_to_M = Period(freq="M", year=2007, month=1) ival_B_to_W = Period(freq="W", year=2007, month=1, day=7) @@ -414,8 +414,8 @@ def test_conv_business(self): freq="s", year=2007, month=1, day=1, hour=23, minute=59, second=59 ) - assert ival_B.asfreq("A") == ival_B_to_A - assert ival_B_end_of_year.asfreq("A") == ival_B_to_A + assert ival_B.asfreq("Y") == ival_B_to_A + assert ival_B_end_of_year.asfreq("Y") == ival_B_to_A assert ival_B.asfreq("Q") == ival_B_to_Q assert ival_B_end_of_quarter.asfreq("Q") == ival_B_to_Q assert ival_B.asfreq("M") == ival_B_to_M @@ -452,11 +452,11 @@ def test_conv_daily(self): ival_B_friday = Period(freq="B", year=2007, month=1, day=5) ival_B_monday = Period(freq="B", year=2007, month=1, day=8) - ival_D_to_A = Period(freq="A", year=2007) + ival_D_to_A = Period(freq="Y", year=2007) - ival_Deoq_to_AJAN = Period(freq="A-JAN", year=2008) - ival_Deoq_to_AJUN = Period(freq="A-JUN", year=2007) - ival_Deoq_to_ADEC = Period(freq="A-DEC", year=2007) + ival_Deoq_to_AJAN = Period(freq="Y-JAN", year=2008) + ival_Deoq_to_AJUN = Period(freq="Y-JUN", year=2007) + ival_Deoq_to_ADEC = Period(freq="Y-DEC", year=2007) ival_D_to_QEJAN = Period(freq="Q-JAN", year=2007, quarter=4) ival_D_to_QEJUN = Period(freq="Q-JUN", year=2007, quarter=3) @@ -480,13 +480,13 @@ def test_conv_daily(self): freq="s", year=2007, month=1, day=1, hour=23, minute=59, second=59 ) - assert ival_D.asfreq("A") == ival_D_to_A + assert ival_D.asfreq("Y") == ival_D_to_A - assert ival_D_end_of_quarter.asfreq("A-JAN") == ival_Deoq_to_AJAN - assert ival_D_end_of_quarter.asfreq("A-JUN") == ival_Deoq_to_AJUN - assert ival_D_end_of_quarter.asfreq("A-DEC") == ival_Deoq_to_ADEC + assert ival_D_end_of_quarter.asfreq("Y-JAN") == ival_Deoq_to_AJAN + assert ival_D_end_of_quarter.asfreq("Y-JUN") == ival_Deoq_to_AJUN + assert ival_D_end_of_quarter.asfreq("Y-DEC") == ival_Deoq_to_ADEC - assert ival_D_end_of_year.asfreq("A") == ival_D_to_A + assert ival_D_end_of_year.asfreq("Y") == ival_D_to_A assert ival_D_end_of_quarter.asfreq("Q") == ival_D_to_QEDEC assert ival_D.asfreq("Q-JAN") == ival_D_to_QEJAN assert ival_D.asfreq("Q-JUN") == ival_D_to_QEJUN @@ -523,7 +523,7 @@ def test_conv_hourly(self): ival_H_end_of_day = Period(freq="H", year=2007, month=1, day=1, hour=23) ival_H_end_of_bus = Period(freq="H", year=2007, month=1, day=1, hour=23) - ival_H_to_A = Period(freq="A", year=2007) + ival_H_to_A = Period(freq="Y", year=2007) ival_H_to_Q = Period(freq="Q", year=2007, quarter=1) ival_H_to_M = Period(freq="M", year=2007, month=1) ival_H_to_W = Period(freq="W", year=2007, month=1, day=7) @@ -544,8 +544,8 @@ def test_conv_hourly(self): freq="s", year=2007, month=1, day=1, hour=0, minute=59, second=59 ) - assert ival_H.asfreq("A") == ival_H_to_A - assert ival_H_end_of_year.asfreq("A") == ival_H_to_A + assert ival_H.asfreq("Y") == ival_H_to_A + assert ival_H_end_of_year.asfreq("Y") == ival_H_to_A assert ival_H.asfreq("Q") == ival_H_to_Q assert ival_H_end_of_quarter.asfreq("Q") == ival_H_to_Q assert ival_H.asfreq("M") == ival_H_to_M @@ -591,7 +591,7 @@ def test_conv_minutely(self): freq="Min", year=2007, month=1, day=1, hour=0, minute=59 ) - ival_T_to_A = Period(freq="A", year=2007) + ival_T_to_A = Period(freq="Y", year=2007) ival_T_to_Q = Period(freq="Q", year=2007, quarter=1) ival_T_to_M = Period(freq="M", year=2007, month=1) ival_T_to_W = Period(freq="W", year=2007, month=1, day=7) @@ -607,8 +607,8 @@ def test_conv_minutely(self): freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=59 ) - assert ival_T.asfreq("A") == ival_T_to_A - assert ival_T_end_of_year.asfreq("A") == ival_T_to_A + assert ival_T.asfreq("Y") == ival_T_to_A + assert ival_T_end_of_year.asfreq("Y") == ival_T_to_A assert ival_T.asfreq("Q") == ival_T_to_Q assert ival_T_end_of_quarter.asfreq("Q") == ival_T_to_Q assert ival_T.asfreq("M") == ival_T_to_M @@ -657,7 +657,7 @@ def test_conv_secondly(self): freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=59 ) - ival_S_to_A = Period(freq="A", year=2007) + ival_S_to_A = Period(freq="Y", year=2007) ival_S_to_Q = Period(freq="Q", year=2007, quarter=1) ival_S_to_M = Period(freq="M", year=2007, month=1) ival_S_to_W = Period(freq="W", year=2007, month=1, day=7) @@ -667,8 +667,8 @@ def test_conv_secondly(self): ival_S_to_H = Period(freq="H", year=2007, month=1, day=1, hour=0) ival_S_to_T = Period(freq="Min", year=2007, month=1, day=1, hour=0, minute=0) - assert ival_S.asfreq("A") == ival_S_to_A - assert ival_S_end_of_year.asfreq("A") == ival_S_to_A + assert ival_S.asfreq("Y") == ival_S_to_A + assert ival_S_end_of_year.asfreq("Y") == ival_S_to_A assert ival_S.asfreq("Q") == ival_S_to_Q assert ival_S_end_of_quarter.asfreq("Q") == ival_S_to_Q assert ival_S.asfreq("M") == ival_S_to_M @@ -707,44 +707,44 @@ def test_conv_microsecond(self): def test_asfreq_mult(self): # normal freq to mult freq - p = Period(freq="A", year=2007) + p = Period(freq="Y", year=2007) # ordinal will not change - for freq in ["3A", offsets.YearEnd(3)]: + for freq in ["3Y", offsets.YearEnd(3)]: result = p.asfreq(freq) - expected = Period("2007", freq="3A") + expected = Period("2007", freq="3Y") assert result == expected assert result.ordinal == expected.ordinal assert result.freq == expected.freq # ordinal will not change - for freq in ["3A", offsets.YearEnd(3)]: + for freq in ["3Y", offsets.YearEnd(3)]: result = p.asfreq(freq, how="S") - expected = Period("2007", freq="3A") + expected = Period("2007", freq="3Y") assert result == expected assert result.ordinal == expected.ordinal assert result.freq == expected.freq # mult freq to normal freq - p = Period(freq="3A", year=2007) + p = Period(freq="3Y", year=2007) # ordinal will change because how=E is the default - for freq in ["A", offsets.YearEnd()]: + for freq in ["Y", offsets.YearEnd()]: result = p.asfreq(freq) - expected = Period("2009", freq="A") + expected = Period("2009", freq="Y") assert result == expected assert result.ordinal == expected.ordinal assert result.freq == expected.freq # ordinal will not change - for freq in ["A", offsets.YearEnd()]: + for freq in ["Y", offsets.YearEnd()]: result = p.asfreq(freq, how="s") - expected = Period("2007", freq="A") + expected = Period("2007", freq="Y") assert result == expected assert result.ordinal == expected.ordinal assert result.freq == expected.freq - p = Period(freq="A", year=2007) + p = Period(freq="Y", year=2007) for freq in ["2M", offsets.MonthEnd(2)]: result = p.asfreq(freq) expected = Period("2007-12", freq="2M") @@ -760,7 +760,7 @@ def test_asfreq_mult(self): assert result.ordinal == expected.ordinal assert result.freq == expected.freq - p = Period(freq="3A", year=2007) + p = Period(freq="3Y", year=2007) for freq in ["2M", offsets.MonthEnd(2)]: result = p.asfreq(freq) expected = Period("2009-12", freq="2M") diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py index 7d07f327e3978..c7d42a83e663c 100644 --- a/pandas/tests/scalar/period/test_period.py +++ b/pandas/tests/scalar/period/test_period.py @@ -61,9 +61,9 @@ def test_construction(self): assert i1 == i2 - i1 = Period("2005", freq="A") + i1 = Period("2005", freq="Y") i2 = Period("2005") - i3 = Period("2005", freq="a") + i3 = Period("2005", freq="y") assert i1 == i2 assert i1 == i3 @@ -224,7 +224,7 @@ def test_period_constructor_offsets(self): assert Period("1/1/2005", freq=offsets.MonthEnd()) == Period( "1/1/2005", freq="M" ) - assert Period("2005", freq=offsets.YearEnd()) == Period("2005", freq="A") + assert Period("2005", freq=offsets.YearEnd()) == Period("2005", freq="Y") assert Period("2005", freq=offsets.MonthEnd()) == Period("2005", freq="M") with tm.assert_produces_warning(FutureWarning, match=bday_msg): assert Period("3/10/12", freq=offsets.BusinessDay()) == Period( @@ -315,13 +315,13 @@ def test_invalid_arguments(self): msg = '^Given date string "-2000" not likely a datetime$' with pytest.raises(ValueError, match=msg): - Period("-2000", "A") + Period("-2000", "Y") msg = "day is out of range for month" with pytest.raises(DateParseError, match=msg): - Period("0", "A") + Period("0", "Y") msg = "Unknown datetime string format, unable to parse" with pytest.raises(DateParseError, match=msg): - Period("1/1/-2000", "A") + Period("1/1/-2000", "Y") def test_constructor_corner(self): expected = Period("2007-01", freq="2M") @@ -331,8 +331,8 @@ def test_constructor_corner(self): p = Period("2007-01-01", freq="D") - result = Period(p, freq="A") - exp = Period("2007", freq="A") + result = Period(p, freq="Y") + exp = Period("2007", freq="Y") assert result == exp def test_constructor_infer_freq(self): @@ -360,11 +360,11 @@ def test_constructor_infer_freq(self): assert p.freq == "us" def test_multiples(self): - result1 = Period("1989", freq="2A") - result2 = Period("1989", freq="A") + result1 = Period("1989", freq="2Y") + result2 = Period("1989", freq="Y") assert result1.ordinal == result2.ordinal - assert result1.freqstr == "2A-DEC" - assert result2.freqstr == "A-DEC" + assert result1.freqstr == "2Y-DEC" + assert result2.freqstr == "Y-DEC" assert result1.freq == offsets.YearEnd(2) assert result2.freq == offsets.YearEnd() @@ -390,7 +390,7 @@ def test_period_cons_quarterly(self, month): @pytest.mark.parametrize("month", MONTHS) def test_period_cons_annual(self, month): # bugs in scikits.timeseries - freq = f"A-{month}" + freq = f"Y-{month}" exp = Period("1989", freq=freq) stamp = exp.to_timestamp("D", how="end") + timedelta(days=30) p = Period(stamp, freq=freq) @@ -428,7 +428,7 @@ def test_period_from_ordinal(self): assert p == res assert isinstance(res, Period) - @pytest.mark.parametrize("freq", ["A", "M", "D", "H"]) + @pytest.mark.parametrize("freq", ["Y", "M", "D", "H"]) def test_construct_from_nat_string_and_freq(self, freq): per = Period("NaT", freq=freq) assert per is NaT @@ -621,7 +621,7 @@ def test_to_timestamp_mult(self): "ignore:Period with BDay freq is deprecated:FutureWarning" ) def test_to_timestamp(self): - p = Period("1982", freq="A") + p = Period("1982", freq="Y") start_ts = p.to_timestamp(how="S") aliases = ["s", "StarT", "BEGIn"] for a in aliases: @@ -635,7 +635,7 @@ def test_to_timestamp(self): assert end_ts == p.to_timestamp("D", how=a) assert end_ts == p.to_timestamp("3D", how=a) - from_lst = ["A", "Q", "M", "W", "B", "D", "H", "Min", "s"] + from_lst = ["Y", "Q", "M", "W", "B", "D", "H", "Min", "s"] def _ex(p): if p.freq == "B": @@ -653,7 +653,7 @@ def _ex(p): # Frequency other than daily - p = Period("1985", freq="A") + p = Period("1985", freq="Y") result = p.to_timestamp("H", how="end") expected = Timestamp(1986, 1, 1) - Timedelta(1, "ns") @@ -732,7 +732,7 @@ def test_to_timestamp_microsecond(self, ts, expected, freq): ("2000-12-15 13:45:26", "s", "2000-12-15 13:45:26", "s"), ("2000-12-15 13:45:26", "min", "2000-12-15 13:45", "min"), ("2000-12-15 13:45:26", "H", "2000-12-15 13:00", "H"), - ("2000-12-15", "Y", "2000", "A-DEC"), + ("2000-12-15", "Y", "2000", "Y-DEC"), ("2000-12-15", "Q", "2000Q4", "Q-DEC"), ("2000-12-15", "M", "2000-12", "M"), ("2000-12-15", "W", "2000-12-11/2000-12-17", "W-SUN"), @@ -763,7 +763,7 @@ def test_strftime(self): class TestPeriodProperties: """Test properties such as year, month, weekday, etc....""" - @pytest.mark.parametrize("freq", ["A", "M", "D", "H"]) + @pytest.mark.parametrize("freq", ["Y", "M", "D", "H"]) def test_is_leap_year(self, freq): # GH 13727 p = Period("2000-01-01 00:00:00", freq=freq) @@ -861,7 +861,7 @@ def test_inner_bounds_start_and_end_time(self, bound, offset, period_property): assert getattr(period, period_property).floor("s") == expected def test_start_time(self): - freq_lst = ["A", "Q", "M", "D", "H", "min", "s"] + freq_lst = ["Y", "Q", "M", "D", "H", "min", "s"] xp = datetime(2012, 1, 1) for f in freq_lst: p = Period("2012", freq=f) @@ -871,7 +871,7 @@ def test_start_time(self): assert Period("2012", freq="W").start_time == datetime(2011, 12, 26) def test_end_time(self): - p = Period("2012", freq="A") + p = Period("2012", freq="Y") def _ex(*args): return Timestamp(Timestamp(datetime(*args)).as_unit("ns")._value - 1) @@ -936,7 +936,7 @@ def _ex(*args): def test_properties_annually(self): # Test properties on Periods with annually frequency. - a_date = Period(freq="A", year=2007) + a_date = Period(freq="Y", year=2007) assert a_date.year == 2007 def test_properties_quarterly(self): @@ -1196,11 +1196,11 @@ def test_add_sub_td64_nat(self, unit): nat - per def test_sub_delta(self): - left, right = Period("2011", freq="A"), Period("2007", freq="A") + left, right = Period("2011", freq="Y"), Period("2007", freq="Y") result = left - right assert result == 4 * right.freq - msg = r"Input has different freq=M from Period\(freq=A-DEC\)" + msg = r"Input has different freq=M from Period\(freq=Y-DEC\)" with pytest.raises(IncompatibleFrequency, match=msg): left - Period("2007-01", freq="M") @@ -1316,7 +1316,7 @@ def test_sub_n_gt_1_offsets(self, offset, kwd_name, n, normalize): def test_add_offset(self): # freq is DateOffset - for freq in ["A", "2A", "3A"]: + for freq in ["Y", "2Y", "3Y"]: p = Period("2011", freq=freq) exp = Period("2013", freq=freq) assert p + offsets.YearEnd(2) == exp @@ -1467,7 +1467,7 @@ def test_sub_offset(self): ] ) - for freq in ["A", "2A", "3A"]: + for freq in ["Y", "2Y", "3Y"]: p = Period("2011", freq=freq) assert p - offsets.YearEnd(2) == Period("2009", freq=freq) @@ -1589,7 +1589,7 @@ def test_small_year_parsing(): def test_negone_ordinals(): - freqs = ["A", "M", "Q", "D", "H", "min", "s"] + freqs = ["Y", "M", "Q", "D", "H", "min", "s"] period = Period(ordinal=-1, freq="D") for freq in freqs: diff --git a/pandas/tests/series/methods/test_align.py b/pandas/tests/series/methods/test_align.py index e1b3dd4888ef5..2091549b4c3c1 100644 --- a/pandas/tests/series/methods/test_align.py +++ b/pandas/tests/series/methods/test_align.py @@ -204,7 +204,7 @@ def test_align_dt64tzindex_mismatched_tzs(): def test_align_periodindex(join_type): - rng = period_range("1/1/2000", "1/1/2010", freq="A") + rng = period_range("1/1/2000", "1/1/2010", freq="Y") ts = Series(np.random.default_rng(2).standard_normal(len(rng)), index=rng) # TODO: assert something? diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py index 8547fd6988791..c8f94632bc25f 100644 --- a/pandas/tests/series/test_arithmetic.py +++ b/pandas/tests/series/test_arithmetic.py @@ -153,7 +153,7 @@ class TestSeriesArithmetic: # Some of these may end up in tests/arithmetic, but are not yet sorted def test_add_series_with_period_index(self): - rng = pd.period_range("1/1/2000", "1/1/2010", freq="A") + rng = pd.period_range("1/1/2000", "1/1/2010", freq="Y") ts = Series(np.random.default_rng(2).standard_normal(len(rng)), index=rng) result = ts + ts[::2] @@ -164,7 +164,7 @@ def test_add_series_with_period_index(self): result = ts + _permute(ts[::2]) tm.assert_series_equal(result, expected) - msg = "Input has different freq=D from Period\\(freq=A-DEC\\)" + msg = "Input has different freq=D from Period\\(freq=Y-DEC\\)" with pytest.raises(IncompatibleFrequency, match=msg): ts + ts.asfreq("D", how="end") diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index d45c655a4c0a2..5a05a1840b644 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -1306,7 +1306,7 @@ def test_construct_from_ints_including_iNaT_scalar_period_dtype(self): assert isna(series[2]) def test_constructor_period_incompatible_frequency(self): - data = [Period("2000", "D"), Period("2001", "A")] + data = [Period("2000", "D"), Period("2001", "Y")] result = Series(data) assert result.dtype == object assert result.tolist() == data diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py index f294885fb8f4d..d088482eb05e3 100644 --- a/pandas/tests/series/test_repr.py +++ b/pandas/tests/series/test_repr.py @@ -247,7 +247,7 @@ def test_index_repr_in_frame_with_nan(self): assert repr(s) == exp def test_format_pre_1900_dates(self): - rng = date_range("1/1/1850", "1/1/1950", freq="A-DEC") + rng = date_range("1/1/1850", "1/1/1950", freq="Y-DEC") rng.format() ts = Series(1, index=rng) repr(ts) diff --git a/pandas/tests/tseries/frequencies/test_freq_code.py b/pandas/tests/tseries/frequencies/test_freq_code.py index 417bf0e90201b..ca8db972e2185 100644 --- a/pandas/tests/tseries/frequencies/test_freq_code.py +++ b/pandas/tests/tseries/frequencies/test_freq_code.py @@ -27,7 +27,7 @@ def test_get_to_timestamp_base(freqstr, exp_freqstr): @pytest.mark.parametrize( "freqstr,expected", [ - ("A", "year"), + ("Y", "year"), ("Q", "quarter"), ("M", "month"), ("D", "day"), @@ -99,9 +99,9 @@ def test_compatibility(freqstr, expected): assert ts_np + do == np.datetime64(expected) -@pytest.mark.parametrize("freq", ["T", "S", "L", "N", "U"]) -def test_units_t_l_deprecated_from__attrname_to_abbrevs(freq): - # GH 52536 +@pytest.mark.parametrize("freq", ["A", "T", "S", "L", "U", "N"]) +def test_units_A_T_S_L_U_N_deprecated_from_attrname_to_abbrevs(freq): + # GH#52536 msg = f"'{freq}' is deprecated and will be removed in a future version." with tm.assert_produces_warning(FutureWarning, match=msg): diff --git a/pandas/tests/tseries/frequencies/test_inference.py b/pandas/tests/tseries/frequencies/test_inference.py index 82cceeac2cd25..0b2978389ea88 100644 --- a/pandas/tests/tseries/frequencies/test_inference.py +++ b/pandas/tests/tseries/frequencies/test_inference.py @@ -52,7 +52,7 @@ def base_delta_code_pair(request): freqs = ( [f"Q-{month}" for month in MONTHS] - + [f"{annual}-{month}" for annual in ["A", "BA"] for month in MONTHS] + + [f"{annual}-{month}" for annual in ["Y", "BA"] for month in MONTHS] + ["ME", "BM", "BMS"] + [f"WOM-{count}{day}" for count in range(1, 5) for day in DAYS] + [f"W-{day}" for day in DAYS] @@ -167,7 +167,7 @@ def test_monthly_ambiguous(): def test_annual_ambiguous(): rng = DatetimeIndex(["1/31/2000", "1/31/2001", "1/31/2002"]) - assert rng.inferred_freq == "A-JAN" + assert rng.inferred_freq == "Y-JAN" @pytest.mark.parametrize("count", range(1, 5)) @@ -359,7 +359,7 @@ def test_not_monotonic(): rng = DatetimeIndex(["1/31/2000", "1/31/2001", "1/31/2002"]) rng = rng[::-1] - assert rng.inferred_freq == "-1A-JAN" + assert rng.inferred_freq == "-1Y-JAN" def test_non_datetime_index2(): @@ -479,18 +479,18 @@ def test_series_datetime_index(freq): "Q@JAN", "Q@FEB", "Q@MAR", - "A@JAN", - "A@FEB", - "A@MAR", - "A@APR", - "A@MAY", - "A@JUN", - "A@JUL", - "A@AUG", - "A@SEP", - "A@OCT", - "A@NOV", - "A@DEC", + "Y@JAN", + "Y@FEB", + "Y@MAR", + "Y@APR", + "Y@MAY", + "Y@JUN", + "Y@JUL", + "Y@AUG", + "Y@SEP", + "Y@OCT", + "Y@NOV", + "Y@DEC", "Y@JAN", "WOM@1MON", "WOM@2MON", diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index a7e9854c38f18..de44cf3f94d26 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -839,7 +839,7 @@ def test_rule_code(self): "NOV", "DEC", ] - base_lst = ["A", "AS", "BA", "BAS", "Q", "QS", "BQ", "BQS"] + base_lst = ["Y", "AS", "BA", "BAS", "Q", "QS", "BQ", "BQS"] for base in base_lst: for v in suffix_lst: alias = "-".join([base, v]) @@ -858,7 +858,7 @@ def test_freq_offsets(): class TestReprNames: def test_str_for_named_is_name(self): # look at all the amazing combinations! - month_prefixes = ["A", "AS", "BA", "BAS", "Q", "BQ", "BQS", "QS"] + month_prefixes = ["Y", "AS", "BA", "BAS", "Q", "BQ", "BQS", "QS"] names = [ prefix + "-" + month for prefix in month_prefixes diff --git a/pandas/tests/tslibs/test_conversion.py b/pandas/tests/tslibs/test_conversion.py index c1ab0ba0b5e6f..d0f8923f3ad89 100644 --- a/pandas/tests/tslibs/test_conversion.py +++ b/pandas/tests/tslibs/test_conversion.py @@ -73,7 +73,7 @@ def test_tz_convert_single_matches_tz_convert_hourly(tz_aware_fixture): _compare_local_to_utc(tz_didx, naive_didx) -@pytest.mark.parametrize("freq", ["D", "A"]) +@pytest.mark.parametrize("freq", ["D", "Y"]) def test_tz_convert_single_matches_tz_convert(tz_aware_fixture, freq): tz = tz_aware_fixture tz_didx = date_range("2018-01-01", "2020-01-01", freq=freq, tz=tz) diff --git a/pandas/tests/tslibs/test_libfrequencies.py b/pandas/tests/tslibs/test_libfrequencies.py index 83f28f6b5dc01..effd3b4b8b4e5 100644 --- a/pandas/tests/tslibs/test_libfrequencies.py +++ b/pandas/tests/tslibs/test_libfrequencies.py @@ -16,10 +16,8 @@ (offsets.QuarterEnd(startingMonth=12).freqstr, "DEC"), ("Q-JAN", "JAN"), (offsets.QuarterEnd(startingMonth=1).freqstr, "JAN"), - ("A-DEC", "DEC"), ("Y-DEC", "DEC"), (offsets.YearEnd().freqstr, "DEC"), - ("A-MAY", "MAY"), ("Y-MAY", "MAY"), (offsets.YearEnd(month=5).freqstr, "MAY"), ], diff --git a/pandas/tests/tslibs/test_parsing.py b/pandas/tests/tslibs/test_parsing.py index ec3579109e7a4..425decc14251a 100644 --- a/pandas/tests/tslibs/test_parsing.py +++ b/pandas/tests/tslibs/test_parsing.py @@ -138,8 +138,8 @@ def test_parsers_quarterly_with_freq_error(date_str, kwargs, msg): "date_str,freq,expected", [ ("2013Q2", None, datetime(2013, 4, 1)), - ("2013Q2", "A-APR", datetime(2012, 8, 1)), - ("2013-Q2", "A-DEC", datetime(2013, 4, 1)), + ("2013Q2", "Y-APR", datetime(2012, 8, 1)), + ("2013-Q2", "Y-DEC", datetime(2013, 4, 1)), ], ) def test_parsers_quarterly_with_freq(date_str, freq, expected): @@ -148,7 +148,7 @@ def test_parsers_quarterly_with_freq(date_str, freq, expected): @pytest.mark.parametrize( - "date_str", ["2Q 2005", "2Q-200A", "2Q-200", "22Q2005", "2Q200.", "6Q-20"] + "date_str", ["2Q 2005", "2Q-200Y", "2Q-200", "22Q2005", "2Q200.", "6Q-20"] ) def test_parsers_quarter_invalid(date_str): if date_str == "6Q-20": diff --git a/pandas/tests/tslibs/test_period_asfreq.py b/pandas/tests/tslibs/test_period_asfreq.py index 99f0a82d6711e..ca207e1031653 100644 --- a/pandas/tests/tslibs/test_period_asfreq.py +++ b/pandas/tests/tslibs/test_period_asfreq.py @@ -54,7 +54,7 @@ def test_intra_day_conversion_factors(freq1, freq2, expected): @pytest.mark.parametrize( - "freq,expected", [("A", 0), ("M", 0), ("W", 1), ("D", 0), ("B", 0)] + "freq,expected", [("Y", 0), ("M", 0), ("W", 1), ("D", 0), ("B", 0)] ) def test_period_ordinal_start_values(freq, expected): # information for Jan. 1, 1970. diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 7aa245341cbdd..e77f56a9928ae 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -66,7 +66,7 @@ key = f"{_prefix}-{_m}" OFFSET_TO_PERIOD_FREQSTR[key] = OFFSET_TO_PERIOD_FREQSTR[_prefix] -for _prefix in ["A", "Q"]: +for _prefix in ["Y", "Q"]: for _m in MONTHS: _alias = f"{_prefix}-{_m}" OFFSET_TO_PERIOD_FREQSTR[_alias] = _alias @@ -345,7 +345,7 @@ def _get_annual_rule(self) -> str | None: if pos_check is None: return None else: - return {"cs": "AS", "bs": "BAS", "ce": "A", "be": "BA"}.get(pos_check) + return {"cs": "AS", "bs": "BAS", "ce": "Y", "be": "BA"}.get(pos_check) def _get_quarterly_rule(self) -> str | None: if len(self.mdiffs) > 1: @@ -574,7 +574,7 @@ def _quarter_months_conform(source: str, target: str) -> bool: def _is_annual(rule: str) -> bool: rule = rule.upper() - return rule == "A" or rule.startswith("A-") + return rule == "Y" or rule.startswith("Y-") def _is_quarterly(rule: str) -> bool:
xref #54275, #54061 deprecated string `'A'` for yearly frequency and YearEnd in favour of `'Y'`. EDIT: deprecated annual frequencies with various fiscal year ends:` "A-DEC", "A-JAN"`, etc. in favour of `"Y-DEC", "Y-JAN"`, etc.
https://api.github.com/repos/pandas-dev/pandas/pulls/55252
2023-09-22T22:31:44Z
2023-10-06T09:40:26Z
2023-10-06T09:40:26Z
2023-10-06T15:09:10Z
Backport PR #55249 on branch 2.1.x (BUG: incompatible dtype when creating string column with loc)
diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index 2c9b10160d144..97aeb56924e65 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -13,7 +13,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ -- +- Fixed bug where PDEP-6 warning about setting an item of an incompatible dtype was being shown when creating a new conditional column (:issue:`55025`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index 7117e34b23ca4..8760c8eeca454 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -624,6 +624,9 @@ def infer_fill_value(val): return np.array("NaT", dtype=DT64NS_DTYPE) elif dtype in ["timedelta", "timedelta64"]: return np.array("NaT", dtype=TD64NS_DTYPE) + return np.array(np.nan, dtype=object) + elif val.dtype.kind == "U": + return np.array(np.nan, dtype=val.dtype) return np.nan diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py index b324291bab31e..370cbf0f33174 100644 --- a/pandas/tests/frame/indexing/test_indexing.py +++ b/pandas/tests/frame/indexing/test_indexing.py @@ -1890,6 +1890,21 @@ def test_setitem_dict_and_set_disallowed_multiindex(self, key): df.loc[key] = 1 +def test_adding_new_conditional_column() -> None: + # https://github.com/pandas-dev/pandas/issues/55025 + df = DataFrame({"x": [1]}) + df.loc[df["x"] == 1, "y"] = "1" + expected = DataFrame({"x": [1], "y": ["1"]}) + tm.assert_frame_equal(df, expected) + + df = DataFrame({"x": [1]}) + # try inserting something which numpy would store as 'object' + value = lambda x: x + df.loc[df["x"] == 1, "y"] = value + expected = DataFrame({"x": [1], "y": [value]}) + tm.assert_frame_equal(df, expected) + + class TestSetitemValidation: # This is adapted from pandas/tests/arrays/masked/test_indexing.py # but checks for warnings instead of errors. diff --git a/pandas/tests/frame/indexing/test_set_value.py b/pandas/tests/frame/indexing/test_set_value.py index cd9ffa0f129a2..32312868adacb 100644 --- a/pandas/tests/frame/indexing/test_set_value.py +++ b/pandas/tests/frame/indexing/test_set_value.py @@ -26,10 +26,7 @@ def test_set_value_resize(self, float_frame): assert float_frame._get_value("foobar", "qux") == 0 res = float_frame.copy() - with tm.assert_produces_warning( - FutureWarning, match="Setting an item of incompatible dtype" - ): - res._set_value("foobar", "baz", "sam") + res._set_value("foobar", "baz", "sam") assert res["baz"].dtype == np.object_ res = float_frame.copy()
Backport PR #55249: BUG: incompatible dtype when creating string column with loc
https://api.github.com/repos/pandas-dev/pandas/pulls/55251
2023-09-22T19:56:16Z
2023-09-22T23:47:53Z
2023-09-22T23:47:53Z
2023-09-22T23:47:53Z
BUG: incompatible dtype when creating string column with loc
diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index 2c9b10160d144..97aeb56924e65 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -13,7 +13,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ -- +- Fixed bug where PDEP-6 warning about setting an item of an incompatible dtype was being shown when creating a new conditional column (:issue:`55025`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index 7117e34b23ca4..8760c8eeca454 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -624,6 +624,9 @@ def infer_fill_value(val): return np.array("NaT", dtype=DT64NS_DTYPE) elif dtype in ["timedelta", "timedelta64"]: return np.array("NaT", dtype=TD64NS_DTYPE) + return np.array(np.nan, dtype=object) + elif val.dtype.kind == "U": + return np.array(np.nan, dtype=val.dtype) return np.nan diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py index b324291bab31e..370cbf0f33174 100644 --- a/pandas/tests/frame/indexing/test_indexing.py +++ b/pandas/tests/frame/indexing/test_indexing.py @@ -1890,6 +1890,21 @@ def test_setitem_dict_and_set_disallowed_multiindex(self, key): df.loc[key] = 1 +def test_adding_new_conditional_column() -> None: + # https://github.com/pandas-dev/pandas/issues/55025 + df = DataFrame({"x": [1]}) + df.loc[df["x"] == 1, "y"] = "1" + expected = DataFrame({"x": [1], "y": ["1"]}) + tm.assert_frame_equal(df, expected) + + df = DataFrame({"x": [1]}) + # try inserting something which numpy would store as 'object' + value = lambda x: x + df.loc[df["x"] == 1, "y"] = value + expected = DataFrame({"x": [1], "y": [value]}) + tm.assert_frame_equal(df, expected) + + class TestSetitemValidation: # This is adapted from pandas/tests/arrays/masked/test_indexing.py # but checks for warnings instead of errors. diff --git a/pandas/tests/frame/indexing/test_set_value.py b/pandas/tests/frame/indexing/test_set_value.py index cd9ffa0f129a2..32312868adacb 100644 --- a/pandas/tests/frame/indexing/test_set_value.py +++ b/pandas/tests/frame/indexing/test_set_value.py @@ -26,10 +26,7 @@ def test_set_value_resize(self, float_frame): assert float_frame._get_value("foobar", "qux") == 0 res = float_frame.copy() - with tm.assert_produces_warning( - FutureWarning, match="Setting an item of incompatible dtype" - ): - res._set_value("foobar", "baz", "sam") + res._set_value("foobar", "baz", "sam") assert res["baz"].dtype == np.object_ res = float_frame.copy()
- [ ] closes #55025 (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55249
2023-09-22T16:36:52Z
2023-09-22T19:55:11Z
2023-09-22T19:55:11Z
2023-09-22T19:55:18Z
BUG: incompatible dtype when creating string column with loc
diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index 2c9b10160d144..97aeb56924e65 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -13,7 +13,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ -- +- Fixed bug where PDEP-6 warning about setting an item of an incompatible dtype was being shown when creating a new conditional column (:issue:`55025`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 871e5817fdf0d..572b668842b9d 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -49,7 +49,6 @@ ABCSeries, ) from pandas.core.dtypes.missing import ( - infer_fill_value, is_valid_na_for_dtype, isna, na_value_for_dtype, @@ -1841,8 +1840,12 @@ def _setitem_with_indexer(self, indexer, value, name: str = "iloc"): self.obj[key] = empty_value else: - # FIXME: GH#42099#issuecomment-864326014 - self.obj[key] = infer_fill_value(value) + # avoid circular import + from pandas.core.series import Series + + new = Series(index=self.obj.index) + new[key] = value + self.obj[key] = new new_indexer = convert_from_missing_indexer_tuple( indexer, self.obj.axes diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py index b324291bab31e..a2a88a48cc18c 100644 --- a/pandas/tests/frame/indexing/test_indexing.py +++ b/pandas/tests/frame/indexing/test_indexing.py @@ -1890,6 +1890,14 @@ def test_setitem_dict_and_set_disallowed_multiindex(self, key): df.loc[key] = 1 +def test_adding_new_conditional_column() -> None: + # https://github.com/pandas-dev/pandas/issues/55025 + df = DataFrame({"x": [1]}) + df.loc[df["x"] == 1, "y"] = "1" + expected = DataFrame({"x": [1], "y": ["1"]}) + tm.assert_frame_equal(df, expected) + + class TestSetitemValidation: # This is adapted from pandas/tests/arrays/masked/test_indexing.py # but checks for warnings instead of errors.
- [ ] closes #55025 (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. sorry this makes no sense, will get back to it
https://api.github.com/repos/pandas-dev/pandas/pulls/55248
2023-09-22T16:17:46Z
2023-09-22T16:19:06Z
null
2023-09-22T16:19:29Z
Create broken-linkcheck.yml
diff --git a/.github/workflows/broken-linkcheck.yml b/.github/workflows/broken-linkcheck.yml new file mode 100644 index 0000000000000..10ab5b08a4437 --- /dev/null +++ b/.github/workflows/broken-linkcheck.yml @@ -0,0 +1,38 @@ +name: Linkcheck +on: + schedule: + # Run monthly on the 1st day of the month + - cron: '0 0 1 * *' + pull_request: + paths: + - ".github/workflows/broken-linkcheck.yml" + - "doc/make.py" +jobs: + linkcheck: + runs-on: ubuntu-latest + defaults: + run: + shell: bash -el {0} + + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up Conda + uses: ./.github/actions/setup-conda + + - name: Build Pandas + uses: ./.github/actions/build_pandas + + - name: Run linkcheck script + working-directory: ./doc + run: | + set -o pipefail + python make.py linkcheck | tee linkcheck.txt + + - name: Display broken links + if: failure() + working-directory: ./doc + run: grep broken linkcheck.txt diff --git a/doc/source/conf.py b/doc/source/conf.py index 86d2494707ce2..6b52b52ce5e13 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -14,6 +14,7 @@ import inspect import logging import os +import re import sys import warnings @@ -798,3 +799,49 @@ def setup(app): app.add_autodocumenter(AccessorMethodDocumenter) app.add_autodocumenter(AccessorCallableDocumenter) app.add_directive("autosummary", PandasAutosummary) + + +# Ignore list for broken links,found in CI run checks for broken-linkcheck.yml + +linkcheck_ignore = [ + "^http://$", + "^https://$", + *[ + re.escape(link) + for link in [ + "http://scatterci.github.io/pydata/pandas", + "http://specs.frictionlessdata.io/json-table-schema/", + "https://cloud.google.com/bigquery/docs/access-control#roles", + "https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query", + "https://crates.io/crates/calamine", + "https://devguide.python.org/setup/#macos", + "https://en.wikipedia.org/wiki/Imputation_statistics", + "https://en.wikipedia.org/wiki/Imputation_(statistics", + "https://github.com/noatamir/pandas-dev", + "https://github.com/pandas-dev/pandas/blob/main/pandas/plotting/__init__.py#L1", + "https://github.com/pandas-dev/pandas/blob/v0.20.2/pandas/core/generic.py#L568", + "https://github.com/pandas-dev/pandas/blob/v0.20.2/pandas/core/frame.py#L1495", + "https://github.com/pandas-dev/pandas/issues/174151", + "https://gitpod.io/#https://github.com/USERNAME/pandas", + "https://manishamde.github.io/blog/2013/03/07/pandas-and-python-top-10/", + "https://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.table", + "https://nipunbatra.github.io/blog/visualisation/2013/05/01/aggregation-timeseries.html", + "https://nbviewer.ipython.org/gist/metakermit/5720498", + "https://numpy.org/doc/stable/user/basics.byteswapping.html", + "https://pandas-gbq.readthedocs.io/en/latest/changelog.html#changelog-0-8-0", + "https://pandas.pydata.org/pandas-docs/stable/io.html#io-chunking", + "https://pandas.pydata.org/pandas-docs/stable/ecosystem.html", + "https://sqlalchemy.readthedocs.io/en/latest/dialects/index.html", + "https://support.sas.com/documentation/cdl/en/lrdict/64316/HTML/default/viewer.htm#a000245912.htm", + "https://support.sas.com/documentation/cdl/en/lrdict/64316/HTML/default/viewer.htm#a000214639.htm", + "https://support.sas.com/documentation/cdl/en/lrdict/64316/HTML/default/viewer.htm#a002283942.htm", + "https://support.sas.com/documentation/cdl/en/lrdict/64316/HTML/default/viewer.htm#a000245965.htm", + "https://support.sas.com/documentation/cdl/en/imlug/66845/HTML/default/viewer.htm#imlug_langref_sect455.htm", + "https://support.sas.com/documentation/cdl/en/lrdict/64316/HTML/default/viewer.htm#a002284668.htm", + "https://support.sas.com/documentation/cdl/en/lrdict/64316/HTML/default/viewer.htm#a002978282.htm", + "https://wesmckinney.com/blog/update-on-upcoming-pandas-v0-10-new-file-parser-other-performance-wins/", + "https://visualstudio.microsoft.com/downloads/#build-tools-for-visual-studio-2022", + "pandas.zip", + ] + ], +]
Created a Github Action to run the Sphinx linkcheck monthly. - [x] closes #xxxx (Replace xxxx with the GitHub issue number) #45409 - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55246
2023-09-22T14:50:51Z
2023-11-07T21:45:29Z
2023-11-07T21:45:29Z
2023-12-05T18:29:39Z
BUG: Rolling pd.date_range incorrect for unit='s' #55026
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index becbba703f92c..2d5fe7b8112ea 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -1884,6 +1884,13 @@ def _validate(self): else: self._win_freq_i8 = freq.nanos + if self._on.dtype == "M8[us]": + self._win_freq_i8 /= 1e3 + elif self._on.dtype == "M8[ms]": + self._win_freq_i8 /= 1e6 + elif self._on.dtype == "M8[s]": + self._win_freq_i8 /= 1e9 + # min_periods must be an integer if self.min_periods is None: self.min_periods = 1 diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index 3fe922539780d..9eb2d544392bb 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -1950,3 +1950,25 @@ def test_numeric_only_corr_cov_series(kernel, use_arg, numeric_only, dtype): op2 = getattr(rolling2, kernel) expected = op2(*arg2, numeric_only=numeric_only) tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"]) +def test_rolling_rolling_max_window_nanoseconds_confilict_timestamp(unit): + # GH#55026 + window = Timedelta(days=4) + + ref_dates = date_range("2023-01-01", "2023-01-10", unit="ns") + ref_series = Series(0, index=ref_dates) + ref_series.iloc[0] = 1 + ref_max_series = ref_series.rolling(window).max() + + dates = date_range("2023-01-01", "2023-01-10", unit=unit) + series = Series(0, index=dates) + series.iloc[0] = 1 + max_series = series.rolling(window).max() + + ref_df = DataFrame(ref_max_series) + df = DataFrame(max_series) + df.index = df.index.as_unit("ns") + + tm.assert_frame_equal(ref_df, df)
- [X] closes #55026 - [X] [Tests added and passed] This follows the #55173 and will be fixed by the same approach. The custom tests were added for the `max` function itself.
https://api.github.com/repos/pandas-dev/pandas/pulls/55243
2023-09-22T12:38:42Z
2023-09-22T16:48:49Z
null
2023-09-22T19:48:27Z
PERF: Add type-hints in tzconversion.pyx
diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 8eab623a2b5f7..ebca1605be8d5 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -238,7 +238,7 @@ Performance improvements - Performance improvement in :meth:`DataFrame.sort_index` and :meth:`Series.sort_index` when indexed by a :class:`MultiIndex` (:issue:`54835`) - Performance improvement in :meth:`Index.difference` (:issue:`55108`) - Performance improvement when indexing with more than 4 keys (:issue:`54550`) -- +- Performance improvement when localizing time to UTC (:issue:`55241`) .. --------------------------------------------------------------------------- .. _whatsnew_220.bug_fixes: diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx index e17d264333264..a96779aa33255 100644 --- a/pandas/_libs/tslibs/tzconversion.pyx +++ b/pandas/_libs/tslibs/tzconversion.pyx @@ -462,7 +462,7 @@ cdef str _render_tstamp(int64_t val, NPY_DATETIMEUNIT creso): cdef _get_utc_bounds( - ndarray vals, + ndarray[int64_t] vals, int64_t* tdata, Py_ssize_t ntrans, const int64_t[::1] deltas, @@ -472,7 +472,7 @@ cdef _get_utc_bounds( # result_a) or right of the DST transition (store in result_b) cdef: - ndarray result_a, result_b + ndarray[int64_t] result_a, result_b Py_ssize_t i, n = vals.size int64_t val, v_left, v_right Py_ssize_t isl, isr, pos_left, pos_right
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Part of #55179. With this, I'm seeing time plummet in the benchmark tslibs.tz_convert.TimeTZConvert.time_tz_localize_to_utc(1000000, tzfile('/usr/share/zoneinfo/Asia/Tokyo')) from 159ms (Cython 0.29) and 271ms (Cython 3.0.2) to ~5-6ms on both.
https://api.github.com/repos/pandas-dev/pandas/pulls/55241
2023-09-22T11:50:25Z
2023-09-22T14:59:39Z
2023-09-22T14:59:39Z
2023-09-24T15:29:44Z
DOC: point out that explicitly passing axis=None is deprecated for sum, prod, var, std, sem
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index c75c6c546aad0..d0fab0037d99d 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -12532,6 +12532,39 @@ def last_valid_index(self) -> Hashable | None: {examples} """ +_sum_prod_doc = """ +{desc} + +Parameters +---------- +axis : {axis_descr} + Axis for the function to be applied on. + For `Series` this parameter is unused and defaults to 0. + + .. warning:: + + The behavior of DataFrame.{name} with ``axis=None`` is deprecated, + in a future version this will reduce over both axes and return a scalar + To retain the old behavior, pass axis=0 (or do not pass axis). + + .. versionadded:: 2.0.0 + +skipna : bool, default True + Exclude NA/null values when computing the result. +numeric_only : bool, default False + Include only float, int, boolean columns. Not implemented for Series. + +{min_count}\ +**kwargs + Additional keyword arguments to be passed to the function. + +Returns +------- +{name1} or scalar\ +{see_also}\ +{examples} +""" + _num_ddof_doc = """ {desc} @@ -12539,6 +12572,13 @@ def last_valid_index(self) -> Hashable | None: ---------- axis : {axis_descr} For `Series` this parameter is unused and defaults to 0. + + .. warning:: + + The behavior of DataFrame.{name} with ``axis=None`` is deprecated, + in a future version this will reduce over both axes and return a scalar + To retain the old behavior, pass axis=0 (or do not pass axis). + skipna : bool, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. @@ -13246,7 +13286,7 @@ def make_doc(name: str, ndim: int) -> str: kwargs = {"min_count": ""} elif name == "sum": - base_doc = _num_doc + base_doc = _sum_prod_doc desc = ( "Return the sum of the values over the requested axis.\n\n" "This is equivalent to the method ``numpy.sum``." @@ -13256,7 +13296,7 @@ def make_doc(name: str, ndim: int) -> str: kwargs = {"min_count": _min_count_stub} elif name == "prod": - base_doc = _num_doc + base_doc = _sum_prod_doc desc = "Return the product of the values over the requested axis." see_also = _stat_func_see_also examples = _prod_examples @@ -13540,6 +13580,7 @@ def make_doc(name: str, ndim: int) -> str: docstr = base_doc.format( desc=desc, + name=name, name1=name1, name2=name2, axis_descr=axis_descr,
- [x] closes #54547 pointed out in the documentation of the methods: `sum, prod, var, std, sem` that explicitly passing `axis=None` is deprecated.
https://api.github.com/repos/pandas-dev/pandas/pulls/55240
2023-09-22T11:11:57Z
2023-09-22T16:50:52Z
2023-09-22T16:50:52Z
2023-09-22T16:50:58Z
DOC: Fix doc for first_valid_index
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index d0fab0037d99d..427687d9614f9 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -12466,11 +12466,6 @@ def first_valid_index(self) -> Hashable | None: ------- type of index - Notes - ----- - If all elements are non-NA/null, returns None. - Also returns None for empty {klass}. - Examples -------- For Series: @@ -12481,6 +12476,22 @@ def first_valid_index(self) -> Hashable | None: >>> s.last_valid_index() 2 + >>> s = pd.Series([None, None]) + >>> print(s.first_valid_index()) + None + >>> print(s.last_valid_index()) + None + + If all elements in Series are NA/null, returns None. + + >>> s = pd.Series() + >>> print(s.first_valid_index()) + None + >>> print(s.last_valid_index()) + None + + If Series is empty, returns None. + For DataFrame: >>> df = pd.DataFrame({{'A': [None, None, 2], 'B': [None, 3, 4]}}) @@ -12493,6 +12504,31 @@ def first_valid_index(self) -> Hashable | None: 1 >>> df.last_valid_index() 2 + + >>> df = pd.DataFrame({{'A': [None, None, None], 'B': [None, None, None]}}) + >>> df + A B + 0 None None + 1 None None + 2 None None + >>> print(df.first_valid_index()) + None + >>> print(df.last_valid_index()) + None + + If all elements in DataFrame are NA/null, returns None. + + >>> df = pd.DataFrame() + >>> df + Empty DataFrame + Columns: [] + Index: [] + >>> print(df.first_valid_index()) + None + >>> print(df.last_valid_index()) + None + + If DataFrame is empty, returns None. """ return self._find_valid_index(how="first")
- [x] closes #55187 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55236
2023-09-22T02:00:41Z
2023-09-22T23:48:53Z
2023-09-22T23:48:53Z
2023-09-22T23:53:17Z
DEPR: Change FutureWarning for observed=False to DeprecationWarning
diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index 2c9b10160d144..bbf7368513fd4 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -29,7 +29,7 @@ Bug fixes Other ~~~~~ -- +- Changed the ``FutureWarning`` raised when ``observed=False`` in :meth:`DataFrame.groupby` and :meth:`Series.groupby` to a ``DeprecationWarning`` (:issue:`54970`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index d607baf18d6cb..942358b6db3de 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1322,7 +1322,7 @@ def __init__( "to True in a future version of pandas. Pass observed=False to " "retain current behavior or observed=True to adopt the future " "default and silence this warning.", - FutureWarning, + DeprecationWarning, stacklevel=find_stack_level(), ) observed = False diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index b11240c841420..cb083eed4c2fc 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -2080,7 +2080,7 @@ def test_groupby_default_depr(cat_columns, keys): df = DataFrame({"a": [1, 1, 2, 3], "b": [4, 5, 6, 7]}) df[cat_columns] = df[cat_columns].astype("category") msg = "The default of observed=False is deprecated" - klass = FutureWarning if set(cat_columns) & set(keys) else None + klass = DeprecationWarning if set(cat_columns) & set(keys) else None with tm.assert_produces_warning(klass, match=msg): df.groupby(keys)
- [ ] closes #54970 (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55235
2023-09-22T01:22:05Z
2023-10-01T02:40:13Z
null
2023-10-01T02:40:15Z
BUG/ENH: Use pyarrow grouped aggregation functions for pyarrow-backed groupby ops
diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py index 4567b5b414301..3743ec4b935cc 100644 --- a/asv_bench/benchmarks/groupby.py +++ b/asv_bench/benchmarks/groupby.py @@ -3,9 +3,11 @@ from string import ascii_letters import numpy as np +import pyarrow as pa from pandas import ( NA, + ArrowDtype, Categorical, DataFrame, Index, @@ -1098,4 +1100,66 @@ def time_resample_multiindex(self): ).mean() +class GroupByAggregateArrowDtypes: + param_names = ["dtype", "method"] + params = [ + [ + ArrowDtype(pa.bool_()), + ArrowDtype(pa.decimal128(25, 3)), + ArrowDtype(pa.float64()), + ArrowDtype(pa.int32()), + ArrowDtype(pa.string()), + ArrowDtype(pa.timestamp("s", "UTC")), + ArrowDtype(pa.duration("ms")), + ], + [ + "any", + "all", + "count", + "sum", + "prod", + "min", + "max", + "mean", + "median", + "std", + ], + ] + + def setup(self, dtype, method): + size = (200_000, 10) + pa_type = dtype.pyarrow_dtype + if pa.types.is_floating(pa_type): + values = np.random.randn(*size) + elif pa.types.is_integer(pa_type): + values = np.random.randint(0, 10_000, size) + elif pa.types.is_decimal(pa_type): + values = np.random.randn(*size) + elif pa.types.is_boolean(pa_type): + values = np.random.randint(0, 2, size, dtype=np.bool_) + elif pa.types.is_timestamp(pa_type): + if method in ["any", "all", "sum", "prod"]: + raise NotImplementedError + values = np.random.randint(0, 10_000, size) + elif pa.types.is_duration(pa_type): + if method == "prod": + raise NotImplementedError + values = np.random.randint(0, 10_000, size) + elif pa.types.is_string(pa_type): + if method in ["any", "all", "sum", "prod", "mean", "median", "std"]: + raise NotImplementedError + values = tm.rands_array(nchars=10, size=size) + else: + raise NotImplementedError + + columns = list("abcdefghij") + df = DataFrame(values, columns=columns, dtype=dtype) + df.iloc[::10, ::2] = NA + df["key"] = np.random.randint(0, high=100, size=(len(values))) + self.df = df + + def time_frame_agg(self, dtype, method): + self.df.groupby("key").agg(method) + + from .pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 0760840f9950a..9a00b18af3126 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -329,7 +329,7 @@ Plotting Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ -- +- Bug in :meth:`.SeriesGroupBy.var` and :meth:`.DataFrameGroupBy.var` where the dtype would be ``np.float64`` for data with :class:`ArrowDtype` with ``pyarrow.decimal128`` type (:issue:`54627`) - Reshaping diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index 4b79d0dbb683e..785215ed95591 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -1466,44 +1466,16 @@ def _reduce_pyarrow(self, name: str, *, skipna: bool = True, **kwargs) -> pa.Sca ------ TypeError : subclass does not define reductions """ - pa_type = self._pa_array.type - - data_to_reduce = self._pa_array - - cast_kwargs = {} if pa_version_under13p0 else {"safe": False} - - if name in ["any", "all"] and ( - pa.types.is_integer(pa_type) - or pa.types.is_floating(pa_type) - or pa.types.is_duration(pa_type) - or pa.types.is_decimal(pa_type) - ): - # pyarrow only supports any/all for boolean dtype, we allow - # for other dtypes, matching our non-pyarrow behavior - - if pa.types.is_duration(pa_type): - data_to_cmp = self._pa_array.cast(pa.int64()) - else: - data_to_cmp = self._pa_array - - not_eq = pc.not_equal(data_to_cmp, 0) - data_to_reduce = not_eq - - elif name in ["min", "max", "sum"] and pa.types.is_duration(pa_type): - data_to_reduce = self._pa_array.cast(pa.int64()) - - elif name in ["median", "mean", "std", "sem"] and pa.types.is_temporal(pa_type): - nbits = pa_type.bit_width - if nbits == 32: - data_to_reduce = self._pa_array.cast(pa.int32()) - else: - data_to_reduce = self._pa_array.cast(pa.int64()) + data_to_reduce = self._values_for_reduction(name) if name == "sem": def pyarrow_meth(data, skip_nulls, **kwargs): numerator = pc.stddev(data, skip_nulls=skip_nulls, **kwargs) - denominator = pc.sqrt_checked(pc.count(self._pa_array)) + if pa_version_under8p0: + denominator = pc.power_checked(pc.count(self._pa_array), 0.5) + else: + denominator = pc.sqrt_checked(pc.count(self._pa_array)) return pc.divide_checked(numerator, denominator) else: @@ -1541,21 +1513,94 @@ def pyarrow_meth(data, skip_nulls, **kwargs): if name == "median": # GH 52679: Use quantile instead of approximate_median; returns array result = result[0] - if pc.is_null(result).as_py(): - return result + return self._maybe_cast_reduction_result(result, name) + + def _values_for_reduction(self, name: str) -> pa.ChunkedArray: + """ + Return the underlying ChunkedArray, possibly cast to a different type in + order to support reductions. + + Parameters + ---------- + name : str + Name of the function, supported values are: + { any, all, min, max, sum, mean, median, prod, + std, var, sem, kurt, skew }. + + Returns + ------- + pa.ChunkedArray + """ + pa_type = self._pa_array.type + + if name in ["any", "all"] and ( + pa.types.is_integer(pa_type) + or pa.types.is_floating(pa_type) + or pa.types.is_duration(pa_type) + or pa.types.is_decimal(pa_type) + ): + # pyarrow only supports any/all for boolean dtype, we allow + # for other dtypes, matching our non-pyarrow behavior + if pa.types.is_duration(pa_type): + data_to_cmp = self._pa_array.cast(pa.int64()) + else: + data_to_cmp = self._pa_array + + not_eq = pc.not_equal(data_to_cmp, 0) + data_to_reduce = not_eq + + elif name in ["min", "max", "sum"] and pa.types.is_duration(pa_type): + data_to_reduce = self._pa_array.cast(pa.int64()) + + elif name in ["median", "mean", "std", "sem"] and pa.types.is_temporal(pa_type): + nbits = pa_type.bit_width + if nbits == 32: + data_to_reduce = self._pa_array.cast(pa.int32()) + else: + data_to_reduce = self._pa_array.cast(pa.int64()) + + elif name in ["sem", "std", "var"] and pa.types.is_boolean(pa_type): + data_to_reduce = self._pa_array.cast(pa.uint8()) + + else: + data_to_reduce = self._pa_array + + return data_to_reduce + + def _maybe_cast_reduction_result( + self, result: pa.Scalar | pa.ChunkedArray, name: str + ): + """ + Maybe cast a reduction result to a different pyarrow type. + + See ArrowExtensionArray._values_for_reduction. + + Parameters + ---------- + result : pa.Scalar | pa.ChunkedArray + name : str + Name of the function, supported values are: + { any, all, min, max, sum, mean, median, prod, + std, var, sem, kurt, skew }. + + Returns + ------- + pa.Scalar or pa.ChunkedArray + """ + pa_type = self._pa_array.type if name in ["min", "max", "sum"] and pa.types.is_duration(pa_type): result = result.cast(pa_type) if name in ["median", "mean"] and pa.types.is_temporal(pa_type): if not pa_version_under13p0: nbits = pa_type.bit_width if nbits == 32: - result = result.cast(pa.int32(), **cast_kwargs) + result = pc.cast(result, pa.int32(), safe=False) else: - result = result.cast(pa.int64(), **cast_kwargs) + result = pc.cast(result, pa.int64(), safe=False) result = result.cast(pa_type) if name in ["std", "sem"] and pa.types.is_temporal(pa_type): - result = result.cast(pa.int64(), **cast_kwargs) + result = pc.cast(result, pa.int64(), safe=False) if pa.types.is_duration(pa_type): result = result.cast(pa_type) elif pa.types.is_time(pa_type): @@ -1999,27 +2044,110 @@ def _groupby_op( **kwargs, ) - # maybe convert to a compatible dtype optimized for groupby - values: ExtensionArray pa_type = self._pa_array.type - if pa.types.is_timestamp(pa_type): - values = self._to_datetimearray() - elif pa.types.is_duration(pa_type): - values = self._to_timedeltaarray() + pa_supported_groupby_aggregations = [ + "any", + "all", + "sum", + "prod", + "min", + "max", + "mean", + "sem", + "std", + "var", + ] + # aggregations not yet supported natively in pyarrow: + # - first/last: https://github.com/apache/arrow/issues/36709 + # - nth + # - median + + if how not in pa_supported_groupby_aggregations: + # maybe convert to a compatible dtype for groupby + values: ExtensionArray + pa_type = self._pa_array.type + if pa.types.is_timestamp(pa_type): + values = self._to_datetimearray() + elif pa.types.is_duration(pa_type): + values = self._to_timedeltaarray() + else: + values = self._to_masked() + + result = values._groupby_op( + how=how, + has_dropped_na=has_dropped_na, + min_count=min_count, + ngroups=ngroups, + ids=ids, + **kwargs, + ) + if isinstance(result, np.ndarray): + return result + return type(self)._from_sequence(result, copy=False) + + arr = self._values_for_reduction(how) + + pa_name = { + "prod": "product", + "std": "stddev", + "var": "variance", + }.get(how, how) + + # pyarrow does not accept a min_count of -1 + if min_count == -1: + if how in ["any", "all"]: + min_count = 0 + else: + min_count = 1 + + if how in ["std", "var"]: + aggregations = [ + ( + "val", + pa_name, + pc.VarianceOptions(ddof=kwargs["ddof"], min_count=min_count), + ) + ] + elif how == "sem": + aggregations = [ + ( + "val", + "stddev", + pc.VarianceOptions(ddof=kwargs["ddof"], min_count=min_count), + ), + ( + "val", + "count", + pc.CountOptions(mode="only_valid"), + ), + ] else: - values = self._to_masked() - - result = values._groupby_op( - how=how, - has_dropped_na=has_dropped_na, - min_count=min_count, - ngroups=ngroups, - ids=ids, - **kwargs, + aggregations = [ + ( + "val", + pa_name, + pc.ScalarAggregateOptions(min_count=min_count), + ) + ] + + res = ( + pa.Table.from_pydict({"id": ids, "val": arr}) + .group_by("id") + .aggregate(aggregations) ) - if isinstance(result, np.ndarray): - return result - return type(self)._from_sequence(result, copy=False) + + if how == "sem": + numerator = res["val_stddev"] + if pa_version_under8p0: + denominator = pc.power_checked(res["val_count"], 0.5) + else: + denominator = pc.sqrt_checked(res["val_count"]) + result = pc.divide_checked(numerator, denominator) + else: + result = res[f"val_{pa_name}"] + + result = self._maybe_cast_reduction_result(result, how) + return type(self)(result) def _apply_elementwise(self, func: Callable) -> list[list[Any]]: """Apply a callable to each element while maintaining the chunking structure.""" diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 933944dbd4632..a01b0ed420664 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -2248,7 +2248,7 @@ def _groupby_op( Parameters ---------- how : {'any', 'all', 'sum', 'prod', 'min', 'max', 'mean', 'median', - 'median', 'var', 'std', 'sem', 'nth', 'last', 'ohlc', + 'var', 'std', 'sem', 'nth', 'first', 'last', 'ohlc', 'cumprod', 'cumsum', 'cummin', 'cummax', 'rank'} has_dropped_na : bool min_count : int diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py index 339e97e735f85..0e18728c19df7 100644 --- a/pandas/tests/extension/test_arrow.py +++ b/pandas/tests/extension/test_arrow.py @@ -503,19 +503,7 @@ def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna, reque and pa.types.is_decimal(pa_dtype) ): request.node.add_marker(xfail_mark) - elif ( - all_numeric_reductions == "sem" - and pa_version_under8p0 - and (dtype._is_numeric or pa.types.is_temporal(pa_dtype)) - ): - request.node.add_marker(xfail_mark) - - elif pa.types.is_boolean(pa_dtype) and all_numeric_reductions in { - "sem", - "std", - "var", - "median", - }: + elif all_numeric_reductions == "median" and pa.types.is_boolean(pa_dtype): request.node.add_marker(xfail_mark) super().test_reduce_series_numeric(data, all_numeric_reductions, skipna) @@ -3046,3 +3034,66 @@ def test_factorize_chunked_dictionary(): exp_uniques = pd.Index(ArrowExtensionArray(pa_array.combine_chunks())) tm.assert_numpy_array_equal(res_indices, exp_indicies) tm.assert_index_equal(res_uniques, exp_uniques) + + +@pytest.mark.parametrize( + "how", ["any", "all", "sum", "prod", "min", "max", "mean", "sem", "std", "var"] +) +def test_groupby_reductions(data, how, request): + # GH 55234 + pa_type = data._pa_array.type + + pyarrow_err_msg = (pa.ArrowNotImplementedError, "no kernel matching input types") + fallback_err_msg = (TypeError, "agg function failed") + err, msg = None, None + + if pa.types.is_string(pa_type) or pa.types.is_binary(pa_type): + if how in ["any", "all", "sem", "std"]: + err, msg = pyarrow_err_msg + elif how in ["sum", "prod", "mean", "var"]: + err, msg = fallback_err_msg + elif pa.types.is_duration(pa_type): + if how in ["prod", "var"]: + err, msg = fallback_err_msg + elif pa.types.is_temporal(pa_type): + if how in ["any", "all"]: + err, msg = pyarrow_err_msg + elif how in ["sum", "prod", "var"]: + err, msg = fallback_err_msg + + null_index = 8 + assert pd.notnull(data[0]) + assert pd.notnull(data[1]) + assert pd.isnull(data[null_index]) + + group_locs = [ + [0, 1, 1], + [0, 1, null_index], + [null_index, 0], + [null_index], + ] + group_lengths = [len(group) for group in group_locs] + groups = np.array(["B", "C", "A", "D"]).repeat(group_lengths) + values = data.take([loc for arr in group_locs for loc in arr]) + df = pd.DataFrame({"key": groups, "val": values}) + gby = df.groupby("key", sort=False)["val"] + + if err is not None: + with pytest.raises(err, match=msg): + result = gby.aggregate(how) + return + else: + result = gby.aggregate(how) + + expected_type = data.take(group_locs[0])._reduce_pyarrow(how).type + expected_arr = pa.array( + [pd.Series(data.take(locs)).agg(how) for locs in group_locs], + type=expected_type, + from_pandas=True, + ) + expected = pd.Series( + ArrowExtensionArray(expected_arr), + index=pd.Index(["B", "C", "A", "D"], name="key"), + name="val", + ) + tm.assert_series_equal(result, expected)
- [x] closes #54627 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/v2.2.0.rst` file if fixing a bug or adding a new feature. Use pyarrow's `TableGroupBy` functionality for pyarrow-backed groupby aggregations. This has a few benefits over the current approach of round-tripping through non-arrow dtypes: 1. proper support for groupby operations with pyarrrow decimal types 2. removes values-dependent behavior (e.g. https://github.com/pandas-dev/pandas/issues/54627#issuecomment-1684160350) 3. aligns grouped aggregation behavior with non-grouped aggregation behavior (existing implementation has a number of subtle differences) Performance impact is mixed. I think the consistency benefits mentioned above outweigh the performance impact here. If others disagree, we could continue using the pandas implementation for specific aggregations. Note: These timings are from a slow laptop. It would be great if someone could run this ASV to confirm (@rhshadrach possibly?) `asv continuous -f 1.1 upstream/main arrow-groupby -b groupby.GroupByAggregateArrowDtypes` ``` before after ratio + 32.3±0.9ms 54.8±3ms 1.70 groupby.GroupByAggregateArrowDtypes.time_frame_agg(int32[pyarrow], 'all') + 35.3±1ms 51.4±2ms 1.46 groupby.GroupByAggregateArrowDtypes.time_frame_agg(duration[ms][pyarrow], 'any') + 36.1±1ms 50.2±0.9ms 1.39 groupby.GroupByAggregateArrowDtypes.time_frame_agg(duration[ms][pyarrow], 'all') + 39.8±6ms 54.6±2ms 1.37 groupby.GroupByAggregateArrowDtypes.time_frame_agg(int32[pyarrow], 'any') + 42.6±2ms 57.1±3ms 1.34 groupby.GroupByAggregateArrowDtypes.time_frame_agg(bool[pyarrow], 'min') + 42.5±0.9ms 54.2±6ms 1.28 groupby.GroupByAggregateArrowDtypes.time_frame_agg(timestamp[s, tz=UTC][pyarrow], 'min') + 40.7±3ms 51.4±3ms 1.26 groupby.GroupByAggregateArrowDtypes.time_frame_agg(duration[ms][pyarrow], 'max') + 40.9±2ms 50.8±4ms 1.24 groupby.GroupByAggregateArrowDtypes.time_frame_agg(timestamp[s, tz=UTC][pyarrow], 'max') + 46.2±3ms 56.4±2ms 1.22 groupby.GroupByAggregateArrowDtypes.time_frame_agg(bool[pyarrow], 'std') + 41.4±2ms 49.3±1ms 1.19 groupby.GroupByAggregateArrowDtypes.time_frame_agg(int32[pyarrow], 'std') + 43.3±2ms 50.7±1ms 1.17 groupby.GroupByAggregateArrowDtypes.time_frame_agg(duration[ms][pyarrow], 'min') - 427±80ms 128±4ms 0.30 groupby.GroupByAggregateArrowDtypes.time_frame_agg(decimal128(25, 3)[pyarrow], 'prod') - 395±40ms 91.3±6ms 0.23 groupby.GroupByAggregateArrowDtypes.time_frame_agg(string[pyarrow], 'max') - 401±9ms 89.3±3ms 0.22 groupby.GroupByAggregateArrowDtypes.time_frame_agg(string[pyarrow], 'min') - 429±50ms 89.0±8ms 0.21 groupby.GroupByAggregateArrowDtypes.time_frame_agg(decimal128(25, 3)[pyarrow], 'max') - 449±80ms 89.1±2ms 0.20 groupby.GroupByAggregateArrowDtypes.time_frame_agg(decimal128(25, 3)[pyarrow], 'min') - 384±60ms 56.1±2ms 0.15 groupby.GroupByAggregateArrowDtypes.time_frame_agg(decimal128(25, 3)[pyarrow], 'sum') - 442±80ms 56.6±2ms 0.13 groupby.GroupByAggregateArrowDtypes.time_frame_agg(decimal128(25, 3)[pyarrow], 'mean') ``` Longer term, there are potentially big performance gains if all arrow-backed aggregations can be passed to pyarrow at once. At the moment, we're iterating column by column. Here is a small example showing the impact: ``` import pandas as pd import numpy as np import pyarrow as pa N = 100_000 columns = list("abcdefghij") data = np.random.randn(N, len(columns)) df = pd.DataFrame(data, columns=columns, dtype="float64[pyarrow]") df["key"] = np.random.randint(0, 100, N) table = pa.table(df) # column-by-column: %timeit [table.group_by("key").aggregate([(col, "sum")]) for col in columns] # all at once: %timeit table.group_by("key").aggregate([(col, "sum") for col in columns]) # 24.7 ms ± 1.17 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) <- column-by-column # 4.54 ms ± 152 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) <- all at once ```
https://api.github.com/repos/pandas-dev/pandas/pulls/55234
2023-09-22T01:04:09Z
2023-10-01T18:54:24Z
null
2023-11-16T12:56:52Z
Typo: 'whenlines' -> 'when lines'
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index ecab14a54beff..d596891197aaa 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -175,7 +175,7 @@ def to_json( if mode == "a" and (not lines or orient != "records"): msg = ( - "mode='a' (append) is only supported when" + "mode='a' (append) is only supported when " "lines is True and orient is 'records'" ) raise ValueError(msg) diff --git a/pandas/tests/io/json/test_readlines.py b/pandas/tests/io/json/test_readlines.py index c2f915e33df8a..d7baba87bba31 100644 --- a/pandas/tests/io/json/test_readlines.py +++ b/pandas/tests/io/json/test_readlines.py @@ -399,7 +399,7 @@ def test_to_json_append_orient(orient_): # Test ValueError when orient is not 'records' df = DataFrame({"col1": [1, 2], "col2": ["a", "b"]}) msg = ( - r"mode='a' \(append\) is only supported when" + r"mode='a' \(append\) is only supported when " "lines is True and orient is 'records'" ) with pytest.raises(ValueError, match=msg): @@ -411,7 +411,7 @@ def test_to_json_append_lines(): # Test ValueError when lines is not True df = DataFrame({"col1": [1, 2], "col2": ["a", "b"]}) msg = ( - r"mode='a' \(append\) is only supported when" + r"mode='a' \(append\) is only supported when " "lines is True and orient is 'records'" ) with pytest.raises(ValueError, match=msg):
I noticed that the text gets joined together when this particular error message gets printed. It looks like `whenlines` instead of `when lines`.
https://api.github.com/repos/pandas-dev/pandas/pulls/55233
2023-09-21T20:09:46Z
2023-09-22T00:10:28Z
2023-09-22T00:10:28Z
2023-09-22T00:10:35Z
BUG: Interchange object data buffer has the wrong dtype / from_dataframe incorrect
diff --git a/pandas/core/interchange/from_dataframe.py b/pandas/core/interchange/from_dataframe.py index 214fbf9f36435..d45ae37890ba7 100644 --- a/pandas/core/interchange/from_dataframe.py +++ b/pandas/core/interchange/from_dataframe.py @@ -266,10 +266,9 @@ def string_column_to_ndarray(col: Column) -> tuple[np.ndarray, Any]: assert buffers["offsets"], "String buffers must contain offsets" # Retrieve the data buffer containing the UTF-8 code units - data_buff, protocol_data_dtype = buffers["data"] + data_buff, _ = buffers["data"] # We're going to reinterpret the buffer as uint8, so make sure we can do it safely - assert protocol_data_dtype[1] == 8 - assert protocol_data_dtype[2] in ( + assert col.dtype[2] in ( ArrowCTypes.STRING, ArrowCTypes.LARGE_STRING, ) # format_str == utf-8 @@ -377,15 +376,16 @@ def datetime_column_to_ndarray(col: Column) -> tuple[np.ndarray | pd.Series, Any """ buffers = col.get_buffers() - _, _, format_str, _ = col.dtype - dbuf, dtype = buffers["data"] + _, col_bit_width, format_str, _ = col.dtype + dbuf, _ = buffers["data"] # Consider dtype being `uint` to get number of units passed since the 01.01.1970 + data = buffer_to_ndarray( dbuf, ( - DtypeKind.UINT, - dtype[1], - getattr(ArrowCTypes, f"UINT{dtype[1]}"), + DtypeKind.INT, + col_bit_width, + getattr(ArrowCTypes, f"INT{col_bit_width}"), Endianness.NATIVE, ), offset=col.offset, diff --git a/pandas/tests/interchange/test_impl.py b/pandas/tests/interchange/test_impl.py index 8a25a2c1889f3..db163397ade67 100644 --- a/pandas/tests/interchange/test_impl.py +++ b/pandas/tests/interchange/test_impl.py @@ -14,6 +14,7 @@ DtypeKind, ) from pandas.core.interchange.from_dataframe import from_dataframe +from pandas.core.interchange.utils import ArrowCTypes @pytest.fixture @@ -326,3 +327,24 @@ def test_interchange_from_non_pandas_tz_aware(): dtype="datetime64[us, Asia/Kathmandu]", ) tm.assert_frame_equal(expected, result) + + +def test_interchange_from_corrected_buffer_dtypes(monkeypatch) -> None: + # https://github.com/pandas-dev/pandas/issues/54781 + df = pd.DataFrame({"a": ["foo", "bar"]}).__dataframe__() + interchange = df.__dataframe__() + column = interchange.get_column_by_name("a") + buffers = column.get_buffers() + buffers_data = buffers["data"] + buffer_dtype = buffers_data[1] + buffer_dtype = ( + DtypeKind.UINT, + 8, + ArrowCTypes.UINT8, + buffer_dtype[3], + ) + buffers["data"] = (buffers_data[0], buffer_dtype) + column.get_buffers = lambda: buffers + interchange.get_column_by_name = lambda _: column + monkeypatch.setattr(df, "__dataframe__", lambda allow_copy: interchange) + pd.api.interchange.from_dataframe(df)
- [ ] closes #54781 (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. I haven't included a whatsnew note as this isn't user-facing
https://api.github.com/repos/pandas-dev/pandas/pulls/55227
2023-09-21T11:25:18Z
2023-11-07T12:06:56Z
2023-11-07T12:06:56Z
2023-11-07T12:06:56Z
Backport PR #55206 on branch 2.1.x (BUILD: Fix duplicate files warning)
diff --git a/pandas/_libs/meson.build b/pandas/_libs/meson.build index 1cf2c4343d844..fd632790546f6 100644 --- a/pandas/_libs/meson.build +++ b/pandas/_libs/meson.build @@ -114,9 +114,40 @@ foreach ext_name, ext_dict : libs_sources ) endforeach -py.install_sources( +# Basically just __init__.py and the .pyi files +sources_to_install = [ '__init__.py', - subdir: 'pandas/_libs' -) + 'algos.pyi', + 'arrays.pyi', + 'byteswap.pyi', + 'groupby.pyi', + 'hashing.pyi', + 'hashtable.pyi', + 'index.pyi', + 'indexing.pyi', + 'internals.pyi', + 'interval.pyi', + 'join.pyi', + 'json.pyi', + 'lib.pyi', + 'missing.pyi', + 'ops.pyi', + 'ops_dispatch.pyi', + 'parsers.pyi', + 'properties.pyi', + 'reshape.pyi', + 'sas.pyi', + 'sparse.pyi', + 'testing.pyi', + 'tslib.pyi', + 'writers.pyi' +] + +foreach source: sources_to_install + py.install_sources( + source, + subdir: 'pandas/_libs' + ) +endforeach subdir('window') diff --git a/pandas/_libs/tslibs/meson.build b/pandas/_libs/tslibs/meson.build index 167695b84514c..a1b0c54d1f48c 100644 --- a/pandas/_libs/tslibs/meson.build +++ b/pandas/_libs/tslibs/meson.build @@ -31,7 +31,28 @@ foreach ext_name, ext_dict : tslibs_sources ) endforeach -py.install_sources( +sources_to_install = [ '__init__.py', - subdir: 'pandas/_libs/tslibs' -) + 'ccalendar.pyi', + 'conversion.pyi', + 'dtypes.pyi', + 'fields.pyi', + 'nattype.pyi', + 'np_datetime.pyi', + 'offsets.pyi', + 'parsing.pyi', + 'period.pyi', + 'strptime.pyi', + 'timedeltas.pyi', + 'timestamps.pyi', + 'timezones.pyi', + 'tzconversion.pyi', + 'vectorized.pyi' +] + +foreach source: sources_to_install + py.install_sources( + source, + subdir: 'pandas/_libs/tslibs' + ) +endforeach diff --git a/pandas/_libs/window/meson.build b/pandas/_libs/window/meson.build index 85aa060a26406..ad15644f73a0c 100644 --- a/pandas/_libs/window/meson.build +++ b/pandas/_libs/window/meson.build @@ -16,3 +16,16 @@ py.extension_module( subdir: 'pandas/_libs/window', install: true ) + +sources_to_install = [ + '__init__.py', + 'aggregations.pyi', + 'indexers.pyi' +] + +foreach source: sources_to_install + py.install_sources( + source, + subdir: 'pandas/_libs/window' + ) +endforeach diff --git a/pandas/meson.build b/pandas/meson.build index f02258c98d46a..435103a954d86 100644 --- a/pandas/meson.build +++ b/pandas/meson.build @@ -26,7 +26,6 @@ subdir('_libs') subdirs_list = [ '_config', - '_libs', '_testing', 'api', 'arrays',
Backport PR #55206: BUILD: Fix duplicate files warning
https://api.github.com/repos/pandas-dev/pandas/pulls/55222
2023-09-20T17:00:07Z
2023-09-20T19:16:58Z
2023-09-20T19:16:58Z
2023-09-20T19:16:58Z
Backport PR #55207 on branch 2.1.x (DOC: Add whatsnew for 2.1.2)
diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst index c21fbc28ae68f..baab20845a2a2 100644 --- a/doc/source/whatsnew/index.rst +++ b/doc/source/whatsnew/index.rst @@ -16,6 +16,7 @@ Version 2.1 .. toctree:: :maxdepth: 2 + v2.1.2 v2.1.1 v2.1.0 diff --git a/doc/source/whatsnew/v2.1.0.rst b/doc/source/whatsnew/v2.1.0.rst index 040ca048d1224..21eef4921ebc0 100644 --- a/doc/source/whatsnew/v2.1.0.rst +++ b/doc/source/whatsnew/v2.1.0.rst @@ -892,4 +892,4 @@ Other Contributors ~~~~~~~~~~~~ -.. contributors:: v2.0.3..v2.1.0|HEAD +.. contributors:: v2.0.3..v2.1.0 diff --git a/doc/source/whatsnew/v2.1.1.rst b/doc/source/whatsnew/v2.1.1.rst index bb3fbd4ffc90f..5a1a1516dc30d 100644 --- a/doc/source/whatsnew/v2.1.1.rst +++ b/doc/source/whatsnew/v2.1.1.rst @@ -51,3 +51,5 @@ Other Contributors ~~~~~~~~~~~~ + +.. contributors:: v2.1.0..v2.1.1|HEAD diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst new file mode 100644 index 0000000000000..2c9b10160d144 --- /dev/null +++ b/doc/source/whatsnew/v2.1.2.rst @@ -0,0 +1,39 @@ +.. _whatsnew_212: + +What's new in 2.1.2 (October ??, 2023) +--------------------------------------- + +These are the changes in pandas 2.1.2. See :ref:`release` for a full changelog +including other versions of pandas. + +{{ header }} + +.. --------------------------------------------------------------------------- +.. _whatsnew_212.regressions: + +Fixed regressions +~~~~~~~~~~~~~~~~~ +- +- + +.. --------------------------------------------------------------------------- +.. _whatsnew_212.bug_fixes: + +Bug fixes +~~~~~~~~~ +- +- + +.. --------------------------------------------------------------------------- +.. _whatsnew_212.other: + +Other +~~~~~ +- +- + +.. --------------------------------------------------------------------------- +.. _whatsnew_212.contributors: + +Contributors +~~~~~~~~~~~~
Backport PR #55207: DOC: Add whatsnew for 2.1.2
https://api.github.com/repos/pandas-dev/pandas/pulls/55221
2023-09-20T13:52:44Z
2023-09-20T16:27:10Z
2023-09-20T16:27:10Z
2023-09-20T16:27:10Z
Backport PR #55008 on branch 2.1.x (CoW: Clear dead references every time we add a new one)
diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx index adf4e8c926fa3..859ebce36eaa8 100644 --- a/pandas/_libs/internals.pyx +++ b/pandas/_libs/internals.pyx @@ -951,6 +951,11 @@ cdef class BlockValuesRefs: else: self.referenced_blocks = [] + def _clear_dead_references(self) -> None: + self.referenced_blocks = [ + ref for ref in self.referenced_blocks if ref() is not None + ] + def add_reference(self, blk: SharedBlock) -> None: """Adds a new reference to our reference collection. @@ -959,6 +964,7 @@ cdef class BlockValuesRefs: blk: SharedBlock The block that the new references should point to. """ + self._clear_dead_references() self.referenced_blocks.append(weakref.ref(blk)) def add_index_reference(self, index: object) -> None: @@ -969,6 +975,7 @@ cdef class BlockValuesRefs: index : Index The index that the new reference should point to. """ + self._clear_dead_references() self.referenced_blocks.append(weakref.ref(index)) def has_reference(self) -> bool: @@ -981,8 +988,6 @@ cdef class BlockValuesRefs: ------- bool """ - self.referenced_blocks = [ - ref for ref in self.referenced_blocks if ref() is not None - ] + self._clear_dead_references() # Checking for more references than block pointing to itself return len(self.referenced_blocks) > 1
#55008
https://api.github.com/repos/pandas-dev/pandas/pulls/55220
2023-09-20T13:44:15Z
2023-09-20T15:54:24Z
2023-09-20T15:54:24Z
2023-09-30T21:24:28Z
BUG: manage raw ods file without cell cache
diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index da2e30edc80ea..b97198c36891c 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -223,6 +223,7 @@ Bug fixes - Bug in :class:`AbstractHolidayCalendar` where timezone data was not propagated when computing holiday observances (:issue:`54580`) - Bug in :class:`pandas.core.window.Rolling` where duplicate datetimelike indexes are treated as consecutive rather than equal with ``closed='left'`` and ``closed='neither'`` (:issue:`20712`) - Bug in :meth:`DataFrame.apply` where passing ``raw=True`` ignored ``args`` passed to the applied function (:issue:`55009`) +- Bug in :meth:`pandas.read_excel` with a ODS file without cached formatted cell for float values (:issue:`55219`) Categorical ^^^^^^^^^^^ diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py index 8016dbbaf7f42..277f64f636731 100644 --- a/pandas/io/excel/_odfreader.py +++ b/pandas/io/excel/_odfreader.py @@ -150,7 +150,7 @@ def get_sheet_data( max_row_len = len(table_row) row_repeat = self._get_row_repeat(sheet_row) - if self._is_empty_row(sheet_row): + if len(table_row) == 0: empty_rows += row_repeat else: # add blank rows to our table @@ -182,16 +182,6 @@ def _get_column_repeat(self, cell) -> int: return int(cell.attributes.get((TABLENS, "number-columns-repeated"), 1)) - def _is_empty_row(self, row) -> bool: - """ - Helper function to find empty rows - """ - for column in row.childNodes: - if len(column.childNodes) > 0: - return False - - return True - def _get_cell_value(self, cell) -> Scalar | NaTType: from odf.namespaces import OFFICENS diff --git a/pandas/tests/io/data/excel/test_unempty_cells.ods b/pandas/tests/io/data/excel/test_unempty_cells.ods new file mode 100644 index 0000000000000..52458753bae06 Binary files /dev/null and b/pandas/tests/io/data/excel/test_unempty_cells.ods differ diff --git a/pandas/tests/io/excel/test_odf.py b/pandas/tests/io/excel/test_odf.py index 25079b235d332..9d49718bec357 100644 --- a/pandas/tests/io/excel/test_odf.py +++ b/pandas/tests/io/excel/test_odf.py @@ -48,3 +48,14 @@ def test_read_newlines_between_xml_elements_table(): result = pd.read_excel("test_newlines.ods") tm.assert_frame_equal(result, expected) + + +def test_read_unempty_cells(): + expected = pd.DataFrame( + [1, np.nan, 3, np.nan, 5], + columns=["Column 1"], + ) + + result = pd.read_excel("test_unempty_cells.ods") + + tm.assert_frame_equal(result, expected)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] ~~Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.~~ - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. ```python import ezodf ods = ezodf.newdoc("ods", "test_unempty_cells.ods") sheet = ezodf.Sheet("base", size=(6, 1)) ods.sheets += sheet sheet[0, 0].set_value("Column 1") for i in range(1, 6, 2): sheet[i, 0].set_value(i) #sheet[i, 0].append(ezodf.Paragraph(str(i))) ods.save() ``` should generate | | Column 1 | |---:|-----------:| | 0 | 1 | | 1 | nan | | 2 | 3 | | 3 | nan | | 4 | 5 | but it generates (an empty DataFrame) | Column 1 | |------------| A hacky fix in building the ODS file is commented out in the snippet `sheet[i, 0].append(ezodf.Paragraph(str(i)))`. There is a cache in ODS file and data is correctly read from the origin but the logic to determine if a full row is empty mistakenly relies on the cache. This PR removes that logic et fixes the bug.
https://api.github.com/repos/pandas-dev/pandas/pulls/55219
2023-09-20T13:40:21Z
2023-09-20T16:57:32Z
2023-09-20T16:57:32Z
2023-09-20T16:57:40Z
Backport PR #55211 on branch 2.1.x (DOC: Add release date for 2.1.1)
diff --git a/doc/source/whatsnew/v2.1.1.rst b/doc/source/whatsnew/v2.1.1.rst index 6d5da7cdff3b3..060235f0d2bff 100644 --- a/doc/source/whatsnew/v2.1.1.rst +++ b/doc/source/whatsnew/v2.1.1.rst @@ -1,6 +1,6 @@ .. _whatsnew_211: -What's new in 2.1.1 (September XX, 2023) +What's new in 2.1.1 (September 20, 2023) ---------------------------------------- These are the changes in pandas 2.1.1. See :ref:`release` for a full changelog
Backport PR #55211: DOC: Add release date for 2.1.1
https://api.github.com/repos/pandas-dev/pandas/pulls/55218
2023-09-20T11:57:26Z
2023-09-20T13:38:34Z
2023-09-20T13:38:34Z
2023-09-20T13:38:34Z
Backport PR #55050 on branch 2.1.x (Fix docstring of Index.join in base.py)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index aa306da017d34..b4ef5380d2b30 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4586,7 +4586,7 @@ def join( ------- join_index, (left_indexer, right_indexer) - Examples + Examples -------- >>> idx1 = pd.Index([1, 2, 3]) >>> idx2 = pd.Index([4, 5, 6])
Backport PR #55050: Fix docstring of Index.join in base.py
https://api.github.com/repos/pandas-dev/pandas/pulls/55217
2023-09-20T11:00:22Z
2023-09-20T15:52:47Z
2023-09-20T15:52:47Z
2023-09-20T15:52:47Z
Backport PR #55210 on branch 2.1.x (CI: Pin matplotlib < 3.8)
diff --git a/ci/deps/actions-310.yaml b/ci/deps/actions-310.yaml index edd48604dbfda..5db145e31ebc5 100644 --- a/ci/deps/actions-310.yaml +++ b/ci/deps/actions-310.yaml @@ -34,7 +34,7 @@ dependencies: - gcsfs>=2022.05.0 - jinja2>=3.1.2 - lxml>=4.8.0 - - matplotlib>=3.6.1 + - matplotlib>=3.6.1, <3.8 - numba>=0.55.2 - numexpr>=2.8.0 - odfpy>=1.4.1 diff --git a/ci/deps/actions-311-downstream_compat.yaml b/ci/deps/actions-311-downstream_compat.yaml index 2fb6072314b1e..2d50056098a0f 100644 --- a/ci/deps/actions-311-downstream_compat.yaml +++ b/ci/deps/actions-311-downstream_compat.yaml @@ -35,7 +35,7 @@ dependencies: - gcsfs>=2022.05.0 - jinja2>=3.1.2 - lxml>=4.8.0 - - matplotlib>=3.6.1 + - matplotlib>=3.6.1, <3.8 - numba>=0.55.2 - numexpr>=2.8.0 - odfpy>=1.4.1 diff --git a/ci/deps/actions-311.yaml b/ci/deps/actions-311.yaml index 029312202bcaa..d56fb9ac9fe90 100644 --- a/ci/deps/actions-311.yaml +++ b/ci/deps/actions-311.yaml @@ -34,7 +34,7 @@ dependencies: - gcsfs>=2022.05.0 - jinja2>=3.1.2 - lxml>=4.8.0 - - matplotlib>=3.6.1 + - matplotlib>=3.6.1, <3.8 - numba>=0.55.2 - numexpr>=2.8.0 - odfpy>=1.4.1 diff --git a/ci/deps/actions-39.yaml b/ci/deps/actions-39.yaml index 12893698b5534..1843e6bd8005f 100644 --- a/ci/deps/actions-39.yaml +++ b/ci/deps/actions-39.yaml @@ -34,7 +34,7 @@ dependencies: - gcsfs>=2022.05.0 - jinja2>=3.1.2 - lxml>=4.8.0 - - matplotlib>=3.6.1 + - matplotlib>=3.6.1, <3.8 - numba>=0.55.2 - numexpr>=2.8.0 - odfpy>=1.4.1 diff --git a/ci/deps/circle-310-arm64.yaml b/ci/deps/circle-310-arm64.yaml index a945dbfe2ca20..c893beced8fb4 100644 --- a/ci/deps/circle-310-arm64.yaml +++ b/ci/deps/circle-310-arm64.yaml @@ -34,7 +34,7 @@ dependencies: - gcsfs>=2022.05.0 - jinja2>=3.1.2 - lxml>=4.8.0 - - matplotlib>=3.6.1 + - matplotlib>=3.6.1, <3.8 # test_numba_vs_cython segfaults with numba 0.57 - numba>=0.55.2, <0.57.0 - numexpr>=2.8.0 diff --git a/environment.yml b/environment.yml index 447abd22e29b4..6f80e9af58643 100644 --- a/environment.yml +++ b/environment.yml @@ -36,7 +36,7 @@ dependencies: - ipython - jinja2>=3.1.2 - lxml>=4.8.0 - - matplotlib>=3.6.1 + - matplotlib>=3.6.1, <3.8 - numba>=0.55.2 - numexpr>=2.8.0 - openpyxl>=3.0.10 diff --git a/requirements-dev.txt b/requirements-dev.txt index 15a8633d09039..01e537f7ea28f 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -25,7 +25,7 @@ gcsfs>=2022.05.0 ipython jinja2>=3.1.2 lxml>=4.8.0 -matplotlib>=3.6.1 +matplotlib>=3.6.1, <3.8 numba>=0.55.2 numexpr>=2.8.0 openpyxl>=3.0.10
Backport PR #55210: CI: Pin matplotlib < 3.8
https://api.github.com/repos/pandas-dev/pandas/pulls/55216
2023-09-20T10:53:25Z
2023-09-20T13:07:42Z
2023-09-20T13:07:42Z
2023-09-20T13:07:43Z
BUG: support non-nanos Timedelta objects in Python C API
diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index da2e30edc80ea..272ee5de5b8d4 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -236,7 +236,7 @@ Datetimelike Timedelta ^^^^^^^^^ -- +- Bug in :func:`_timedelta_from_value_and_reso` causing pytimedelta C-APIs to be incorrect (:issue:`54682`) - Timezones diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 2f6fa35cae070..dc9f35718ee2b 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -27,6 +27,9 @@ cnp.import_array() from cpython.datetime cimport ( PyDateTime_Check, + PyDateTime_DELTA_GET_DAYS, + PyDateTime_DELTA_GET_MICROSECONDS, + PyDateTime_DELTA_GET_SECONDS, PyDelta_Check, import_datetime, timedelta, @@ -958,18 +961,28 @@ cdef _timedelta_from_value_and_reso(cls, int64_t value, NPY_DATETIMEUNIT reso): _Timedelta td_base assert value != NPY_NAT - # For millisecond and second resos, we cannot actually pass int(value) because - # many cases would fall outside of the pytimedelta implementation bounds. - # We pass 0 instead, and override seconds, microseconds, days. - # In principle we could pass 0 for ns and us too. + + # In order to not break the C-API for pytimedeltas, + # we pass values when they are within bounds + # and pass zero when values are not within bounds. + # Thus pytimedelta C-API values stop working only when they + # should, while allowing Pandas Timedeltas to work for + # a broader set of values than the pytimedelta bounds. + if reso == NPY_FR_ns: td_base = _Timedelta.__new__(cls, microseconds=int(value) // 1000) elif reso == NPY_DATETIMEUNIT.NPY_FR_us: td_base = _Timedelta.__new__(cls, microseconds=int(value)) elif reso == NPY_DATETIMEUNIT.NPY_FR_ms: - td_base = _Timedelta.__new__(cls, milliseconds=0) + try: + td_base = _Timedelta.__new__(cls, milliseconds=int(value)) + except OverflowError: + td_base = _Timedelta.__new__(cls, milliseconds=0) elif reso == NPY_DATETIMEUNIT.NPY_FR_s: - td_base = _Timedelta.__new__(cls, seconds=0) + try: + td_base = _Timedelta.__new__(cls, seconds=int(value)) + except OverflowError: + td_base = _Timedelta.__new__(cls, seconds=0) # Other resolutions are disabled but could potentially be implemented here: # elif reso == NPY_DATETIMEUNIT.NPY_FR_m: # td_base = _Timedelta.__new__(Timedelta, minutes=int(value)) @@ -988,6 +1001,13 @@ cdef _timedelta_from_value_and_reso(cls, int64_t value, NPY_DATETIMEUNIT reso): return td_base +def _python_get_unit_from_dtype(dtype): + # exposing numpy unit translation for pytest purposes only + # alternative would be hardcoding resos in the test file, + # potentially breaking tests unnecessarily if these resos change + return get_unit_from_dtype(dtype) + + class MinMaxReso: """ We need to define min/max/resolution on both the Timedelta _instance_ @@ -1070,8 +1090,8 @@ cdef class _Timedelta(timedelta): >>> td.days 0 """ - # NB: using the python C-API PyDateTime_DELTA_GET_DAYS will fail - # (or be incorrect) + # NB: using the python C-API PyDateTime_DELTA_GET_DAYS may fail + # (or may be incorrect) self._ensure_components() return self._d @@ -1108,15 +1128,15 @@ cdef class _Timedelta(timedelta): >>> td.seconds 42 """ - # NB: using the python C-API PyDateTime_DELTA_GET_SECONDS will fail - # (or be incorrect) + # NB: using the python C-API PyDateTime_DELTA_GET_SECONDS may fail + # (or may be incorrect) self._ensure_components() return self._h * 3600 + self._m * 60 + self._s @property def microseconds(self) -> int: # TODO(cython3): make cdef property - # NB: using the python C-API PyDateTime_DELTA_GET_MICROSECONDS will fail - # (or be incorrect) + # NB: using the python C-API PyDateTime_DELTA_GET_MICROSECONDS may fail + # (or may be incorrect) self._ensure_components() return self._ms * 1000 + self._us @@ -1695,6 +1715,21 @@ cdef class _Timedelta(timedelta): self = self._as_creso(other._creso) return self, other + def _get_pytimedelta_days(self): + # exposing C-API functions for testing purposes only + # consistent behavior is NOT guaranteed + return PyDateTime_DELTA_GET_DAYS(self) + + def _get_pytimedelta_seconds(self): + # exposing C-API functions for testing purposes only + # consistent behavior is NOT guaranteed + return PyDateTime_DELTA_GET_SECONDS(self) + + def _get_pytimedelta_microseconds(self): + # exposing C-API functions for testing purposes only + # consistent behavior is NOT guaranteed + return PyDateTime_DELTA_GET_MICROSECONDS(self) + # Python front end to C extension type _Timedelta # This serves as the box for timedelta64 diff --git a/pandas/tests/tslibs/test_timedeltas.py b/pandas/tests/tslibs/test_timedeltas.py index 4784a6d0d600d..666599af23a72 100644 --- a/pandas/tests/tslibs/test_timedeltas.py +++ b/pandas/tests/tslibs/test_timedeltas.py @@ -1,9 +1,11 @@ import re import numpy as np +import pyarrow as pa import pytest from pandas._libs.tslibs.timedeltas import ( + _python_get_unit_from_dtype, array_to_timedelta64, delta_to_nanoseconds, ints_to_pytimedelta, @@ -147,3 +149,120 @@ def test_ints_to_pytimedelta_unsupported(unit): msg = "Only resolutions 's', 'ms', 'us', 'ns' are supported" with pytest.raises(NotImplementedError, match=msg): ints_to_pytimedelta(arr, box=True) + + +@pytest.mark.parametrize( + "original_unit,value,new_unit,expected_pytime_days,expected_pytime_seconds,expected_pytime_microseconds", + [ + ["days", 1, "s", 1, 0, 0], + ["seconds", 1, "s", 0, 1, 0], + ["milliseconds", 1, "ms", 0, 0, 1000], + ["microseconds", 1, "us", 0, 0, 1], + ["days", 1, "ms", 1, 0, 0], + ["days", 1, "us", 1, 0, 0], + ], +) +def test_timedelta_as_unit_conversion( + original_unit, + value, + new_unit, + expected_pytime_days, + expected_pytime_seconds, + expected_pytime_microseconds, +): + kwargs = {original_unit: value} + orig_timedelta = Timedelta(**kwargs) + + converted_timedelta = orig_timedelta.as_unit(new_unit) + + assert converted_timedelta._get_pytimedelta_days() == expected_pytime_days + assert converted_timedelta._get_pytimedelta_seconds() == expected_pytime_seconds + assert ( + converted_timedelta._get_pytimedelta_microseconds() + == expected_pytime_microseconds + ) + # finally test with pyarrow, since this was previously a bug + assert pa.scalar(converted_timedelta) == pa.scalar(orig_timedelta) + + +@pytest.mark.parametrize( + "unit,value,expected_pytime_days,expected_pytime_seconds,expected_pytime_microseconds", + [ + ["s", 1, 0, 1, 0], + ["s", 86400, 1, 0, 0], + ["s", 86401, 1, 1, 0], + ["ms", 1, 0, 0, 1000], + ["ms", 1000, 0, 1, 0], + ["ms", 86400000, 1, 0, 0], + ["ms", 86401001, 1, 1, 1000], + ["us", 1, 0, 0, 1], + ["us", 1000000, 0, 1, 0], + ["us", 86400000000, 1, 0, 0], + ["us", 86400000001, 1, 0, 1], + ["ns", 1, 0, 0, 0], + ["ns", 1000, 0, 0, 1], + # cases for just below the upper limit of pytimedelta + [ + "s", + 86399999999999, + 999999999, + 86399, + 0, + ], # (86400 s/day)*(1 billion days) - 1 s + [ + "ms", + 86399999999999999, + 999999999, + 86399, + 999000, + ], # (86400000 ms/day)*(1 billion days) - 1 ms + # cases for just above the lower limit of pytimedelta + ["s", -86399999913600, -999999999, 0, 0], # (86400 s/day)*(1 billion - 1 days) + [ + "ms", + -86399999913600000, + -999999999, + 0, + 0, + ], # (86400000 ms/day)*(1 billion - 1 days) + ], +) +def test_non_nano_c_api( + unit, + value, + expected_pytime_days, + expected_pytime_seconds, + expected_pytime_microseconds, +): + tmp_delta = Timedelta(days=1) # just for exposing the function under test + dtype = np.dtype(f"m8[{unit}]") + reso = _python_get_unit_from_dtype(dtype) + + uut_delta = Timedelta(tmp_delta._from_value_and_reso(value, reso)) + + assert uut_delta._get_pytimedelta_days() == expected_pytime_days + assert uut_delta._get_pytimedelta_seconds() == expected_pytime_seconds + assert uut_delta._get_pytimedelta_microseconds() == expected_pytime_microseconds + + +@pytest.mark.parametrize( + "unit,value", + [ + # upper limit for pytimedelta is 1 billion days + ["s", 86400000000000], # (86400 s/day)*(1 billion days) + ["ms", 86400000000000000], # (86400000 ms/day)*(1 billion days) + # lower limit for pytimedelta is (1 billion - 1) days + ["s", -86399999913601], # (86400 s/day)*(1 billion - 1 days) - 1 s + ["ms", -86399999913600001], # (86400000 ms/day)*(1 billion - 1 days) - 1 ms + ], +) +def test_non_nano_c_api_pytimedelta_overflow(unit, value): + tmp_delta = Timedelta(days=1) # just for exposing the function under test + dtype = np.dtype(f"m8[{unit}]") + reso = _python_get_unit_from_dtype(dtype) + + uut_delta = Timedelta(tmp_delta._from_value_and_reso(value, reso)) + + assert uut_delta._get_pytimedelta_days() == 0 + assert uut_delta._get_pytimedelta_seconds() == 0 + assert uut_delta._get_pytimedelta_microseconds() == 0
- [x] closes #54682 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55213
2023-09-20T04:18:32Z
2023-11-03T20:30:18Z
null
2023-11-03T20:30:18Z
DOC: Add release date for 2.1.1
diff --git a/doc/source/whatsnew/v2.1.1.rst b/doc/source/whatsnew/v2.1.1.rst index 0dc113bd9ab7f..42a7029b1fa77 100644 --- a/doc/source/whatsnew/v2.1.1.rst +++ b/doc/source/whatsnew/v2.1.1.rst @@ -1,6 +1,6 @@ .. _whatsnew_211: -What's new in 2.1.1 (September XX, 2023) +What's new in 2.1.1 (September 20, 2023) ---------------------------------------- These are the changes in pandas 2.1.1. See :ref:`release` for a full changelog
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55211
2023-09-20T02:59:09Z
2023-09-20T11:57:17Z
2023-09-20T11:57:17Z
2023-09-20T11:57:18Z