title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
DOC: Ignore functions on validate docstrings - error EX01 | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index b22f19faff9da..dc8784de67f13 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -83,6 +83,500 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
$BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=EX04,GL01,GL02,GL03,GL04,GL05,GL06,GL07,GL09,GL10,PR03,PR04,PR05,PR06,PR08,PR09,PR10,RT01,RT02,RT04,RT05,SA02,SA03,SA04,SS01,SS02,SS03,SS04,SS05,SS06
RET=$(($RET + $?)) ; echo $MSG "DONE"
+ MSG='Partially validate docstrings (EX01)' ; echo $MSG
+ $BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=EX01 --ignore_functions \
+ pandas.Series.index \
+ pandas.Series.dtype \
+ pandas.Series.shape \
+ pandas.Series.nbytes \
+ pandas.Series.ndim \
+ pandas.Series.size \
+ pandas.Series.T \
+ pandas.Series.hasnans \
+ pandas.Series.dtypes \
+ pandas.Series.to_period \
+ pandas.Series.to_timestamp \
+ pandas.Series.to_list \
+ pandas.Series.__iter__ \
+ pandas.Series.keys \
+ pandas.Series.item \
+ pandas.Series.pipe \
+ pandas.Series.kurt \
+ pandas.Series.mean \
+ pandas.Series.median \
+ pandas.Series.mode \
+ pandas.Series.sem \
+ pandas.Series.skew \
+ pandas.Series.kurtosis \
+ pandas.Series.is_unique \
+ pandas.Series.is_monotonic_increasing \
+ pandas.Series.is_monotonic_decreasing \
+ pandas.Series.backfill \
+ pandas.Series.pad \
+ pandas.Series.argsort \
+ pandas.Series.reorder_levels \
+ pandas.Series.ravel \
+ pandas.Series.first_valid_index \
+ pandas.Series.last_valid_index \
+ pandas.Series.dt.date \
+ pandas.Series.dt.time \
+ pandas.Series.dt.timetz \
+ pandas.Series.dt.dayofyear \
+ pandas.Series.dt.day_of_year \
+ pandas.Series.dt.quarter \
+ pandas.Series.dt.daysinmonth \
+ pandas.Series.dt.days_in_month \
+ pandas.Series.dt.tz \
+ pandas.Series.dt.end_time \
+ pandas.Series.dt.days \
+ pandas.Series.dt.seconds \
+ pandas.Series.dt.microseconds \
+ pandas.Series.dt.nanoseconds \
+ pandas.Series.str.center \
+ pandas.Series.str.decode \
+ pandas.Series.str.encode \
+ pandas.Series.str.find \
+ pandas.Series.str.fullmatch \
+ pandas.Series.str.index \
+ pandas.Series.str.ljust \
+ pandas.Series.str.match \
+ pandas.Series.str.normalize \
+ pandas.Series.str.rfind \
+ pandas.Series.str.rindex \
+ pandas.Series.str.rjust \
+ pandas.Series.str.translate \
+ pandas.Series.sparse \
+ pandas.DataFrame.sparse \
+ pandas.Series.cat.categories \
+ pandas.Series.cat.ordered \
+ pandas.Series.cat.codes \
+ pandas.Series.cat.reorder_categories \
+ pandas.Series.cat.set_categories \
+ pandas.Series.cat.as_ordered \
+ pandas.Series.cat.as_unordered \
+ pandas.Series.sparse.fill_value \
+ pandas.Flags \
+ pandas.Series.attrs \
+ pandas.Series.plot \
+ pandas.Series.hist \
+ pandas.Series.to_string \
+ pandas.errors.AbstractMethodError \
+ pandas.errors.AccessorRegistrationWarning \
+ pandas.errors.AttributeConflictWarning \
+ pandas.errors.DataError \
+ pandas.errors.EmptyDataError \
+ pandas.errors.IncompatibilityWarning \
+ pandas.errors.InvalidComparison \
+ pandas.errors.InvalidIndexError \
+ pandas.errors.InvalidVersion \
+ pandas.errors.IntCastingNaNError \
+ pandas.errors.LossySetitemError \
+ pandas.errors.MergeError \
+ pandas.errors.NoBufferPresent \
+ pandas.errors.NullFrequencyError \
+ pandas.errors.NumbaUtilError \
+ pandas.errors.OptionError \
+ pandas.errors.OutOfBoundsDatetime \
+ pandas.errors.OutOfBoundsTimedelta \
+ pandas.errors.ParserError \
+ pandas.errors.PerformanceWarning \
+ pandas.errors.PyperclipException \
+ pandas.errors.PyperclipWindowsException \
+ pandas.errors.UnsortedIndexError \
+ pandas.errors.UnsupportedFunctionCall \
+ pandas.show_versions \
+ pandas.test \
+ pandas.NaT \
+ pandas.Timestamp.unit \
+ pandas.Timestamp.as_unit \
+ pandas.Timestamp.ctime \
+ pandas.Timestamp.date \
+ pandas.Timestamp.dst \
+ pandas.Timestamp.isocalendar \
+ pandas.Timestamp.isoweekday \
+ pandas.Timestamp.strptime \
+ pandas.Timestamp.time \
+ pandas.Timestamp.timetuple \
+ pandas.Timestamp.timetz \
+ pandas.Timestamp.to_datetime64 \
+ pandas.Timestamp.toordinal \
+ pandas.Timestamp.tzname \
+ pandas.Timestamp.utcoffset \
+ pandas.Timestamp.utctimetuple \
+ pandas.Timestamp.weekday \
+ pandas.arrays.DatetimeArray \
+ pandas.Timedelta.components \
+ pandas.Timedelta.view \
+ pandas.Timedelta.as_unit \
+ pandas.Timedelta.ceil \
+ pandas.Timedelta.floor \
+ pandas.Timedelta.round \
+ pandas.Timedelta.to_pytimedelta \
+ pandas.Timedelta.to_timedelta64 \
+ pandas.Timedelta.to_numpy \
+ pandas.Timedelta.total_seconds \
+ pandas.arrays.TimedeltaArray \
+ pandas.Period.end_time \
+ pandas.Period.freqstr \
+ pandas.Period.is_leap_year \
+ pandas.Period.month \
+ pandas.Period.quarter \
+ pandas.Period.year \
+ pandas.Period.asfreq \
+ pandas.Period.now \
+ pandas.Period.to_timestamp \
+ pandas.arrays.PeriodArray \
+ pandas.Interval.closed \
+ pandas.Interval.left \
+ pandas.Interval.length \
+ pandas.Interval.right \
+ pandas.arrays.IntervalArray.left \
+ pandas.arrays.IntervalArray.right \
+ pandas.arrays.IntervalArray.closed \
+ pandas.arrays.IntervalArray.mid \
+ pandas.arrays.IntervalArray.length \
+ pandas.arrays.IntervalArray.is_non_overlapping_monotonic \
+ pandas.arrays.IntervalArray.from_arrays \
+ pandas.arrays.IntervalArray.to_tuples \
+ pandas.Int8Dtype \
+ pandas.Int16Dtype \
+ pandas.Int32Dtype \
+ pandas.Int64Dtype \
+ pandas.UInt8Dtype \
+ pandas.UInt16Dtype \
+ pandas.UInt32Dtype \
+ pandas.UInt64Dtype \
+ pandas.NA \
+ pandas.Float32Dtype \
+ pandas.Float64Dtype \
+ pandas.CategoricalDtype.categories \
+ pandas.CategoricalDtype.ordered \
+ pandas.Categorical.dtype \
+ pandas.Categorical.categories \
+ pandas.Categorical.ordered \
+ pandas.Categorical.codes \
+ pandas.Categorical.__array__ \
+ pandas.SparseDtype \
+ pandas.DatetimeTZDtype.unit \
+ pandas.DatetimeTZDtype.tz \
+ pandas.PeriodDtype.freq \
+ pandas.IntervalDtype.subtype \
+ pandas_dtype \
+ pandas.api.types.is_bool \
+ pandas.api.types.is_complex \
+ pandas.api.types.is_float \
+ pandas.api.types.is_integer \
+ pandas.api.types.pandas_dtype \
+ pandas.read_clipboard \
+ pandas.ExcelFile.parse \
+ pandas.DataFrame.to_html \
+ pandas.io.formats.style.Styler.to_html \
+ pandas.HDFStore.put \
+ pandas.HDFStore.append \
+ pandas.HDFStore.get \
+ pandas.HDFStore.select \
+ pandas.HDFStore.info \
+ pandas.HDFStore.keys \
+ pandas.HDFStore.groups \
+ pandas.HDFStore.walk \
+ pandas.read_feather \
+ pandas.DataFrame.to_feather \
+ pandas.read_parquet \
+ pandas.read_orc \
+ pandas.read_sas \
+ pandas.read_spss \
+ pandas.read_sql_query \
+ pandas.read_gbq \
+ pandas.io.stata.StataReader.data_label \
+ pandas.io.stata.StataReader.value_labels \
+ pandas.io.stata.StataReader.variable_labels \
+ pandas.io.stata.StataWriter.write_file \
+ pandas.core.resample.Resampler.__iter__ \
+ pandas.core.resample.Resampler.groups \
+ pandas.core.resample.Resampler.indices \
+ pandas.core.resample.Resampler.get_group \
+ pandas.core.resample.Resampler.ffill \
+ pandas.core.resample.Resampler.asfreq \
+ pandas.core.resample.Resampler.count \
+ pandas.core.resample.Resampler.nunique \
+ pandas.core.resample.Resampler.max \
+ pandas.core.resample.Resampler.mean \
+ pandas.core.resample.Resampler.median \
+ pandas.core.resample.Resampler.min \
+ pandas.core.resample.Resampler.ohlc \
+ pandas.core.resample.Resampler.prod \
+ pandas.core.resample.Resampler.size \
+ pandas.core.resample.Resampler.sem \
+ pandas.core.resample.Resampler.std \
+ pandas.core.resample.Resampler.sum \
+ pandas.core.resample.Resampler.var \
+ pandas.core.resample.Resampler.quantile \
+ pandas.describe_option \
+ pandas.reset_option \
+ pandas.get_option \
+ pandas.set_option \
+ pandas.plotting.deregister_matplotlib_converters \
+ pandas.plotting.plot_params \
+ pandas.plotting.register_matplotlib_converters \
+ pandas.plotting.table \
+ pandas.util.hash_array \
+ pandas.util.hash_pandas_object \
+ pandas_object \
+ pandas.api.interchange.from_dataframe \
+ pandas.Index.values \
+ pandas.Index.hasnans \
+ pandas.Index.dtype \
+ pandas.Index.inferred_type \
+ pandas.Index.shape \
+ pandas.Index.name \
+ pandas.Index.nbytes \
+ pandas.Index.ndim \
+ pandas.Index.size \
+ pandas.Index.T \
+ pandas.Index.memory_usage \
+ pandas.Index.copy \
+ pandas.Index.drop \
+ pandas.Index.identical \
+ pandas.Index.insert \
+ pandas.Index.is_ \
+ pandas.Index.take \
+ pandas.Index.putmask \
+ pandas.Index.unique \
+ pandas.Index.fillna \
+ pandas.Index.dropna \
+ pandas.Index.astype \
+ pandas.Index.item \
+ pandas.Index.map \
+ pandas.Index.ravel \
+ pandas.Index.to_list \
+ pandas.Index.append \
+ pandas.Index.join \
+ pandas.Index.asof_locs \
+ pandas.Index.get_slice_bound \
+ pandas.RangeIndex \
+ pandas.RangeIndex.start \
+ pandas.RangeIndex.stop \
+ pandas.RangeIndex.step \
+ pandas.RangeIndex.from_range \
+ pandas.CategoricalIndex.codes \
+ pandas.CategoricalIndex.categories \
+ pandas.CategoricalIndex.ordered \
+ pandas.CategoricalIndex.reorder_categories \
+ pandas.CategoricalIndex.set_categories \
+ pandas.CategoricalIndex.as_ordered \
+ pandas.CategoricalIndex.as_unordered \
+ pandas.CategoricalIndex.equals \
+ pandas.IntervalIndex.closed \
+ pandas.IntervalIndex.values \
+ pandas.IntervalIndex.is_non_overlapping_monotonic \
+ pandas.IntervalIndex.to_tuples \
+ pandas.MultiIndex.dtypes \
+ pandas.MultiIndex.drop \
+ pandas.DatetimeIndex \
+ pandas.DatetimeIndex.date \
+ pandas.DatetimeIndex.time \
+ pandas.DatetimeIndex.timetz \
+ pandas.DatetimeIndex.dayofyear \
+ pandas.DatetimeIndex.day_of_year \
+ pandas.DatetimeIndex.quarter \
+ pandas.DatetimeIndex.tz \
+ pandas.DatetimeIndex.freqstr \
+ pandas.DatetimeIndex.inferred_freq \
+ pandas.DatetimeIndex.indexer_at_time \
+ pandas.DatetimeIndex.indexer_between_time \
+ pandas.DatetimeIndex.snap \
+ pandas.DatetimeIndex.as_unit \
+ pandas.DatetimeIndex.to_pydatetime \
+ pandas.DatetimeIndex.to_series \
+ pandas.DatetimeIndex.mean \
+ pandas.DatetimeIndex.std \
+ pandas.TimedeltaIndex \
+ pandas.TimedeltaIndex.days \
+ pandas.TimedeltaIndex.seconds \
+ pandas.TimedeltaIndex.microseconds \
+ pandas.TimedeltaIndex.nanoseconds \
+ pandas.TimedeltaIndex.components \
+ pandas.TimedeltaIndex.inferred_freq \
+ pandas.TimedeltaIndex.as_unit \
+ pandas.TimedeltaIndex.to_pytimedelta \
+ pandas.TimedeltaIndex.mean \
+ pandas.PeriodIndex.day \
+ pandas.PeriodIndex.dayofweek \
+ pandas.PeriodIndex.day_of_week \
+ pandas.PeriodIndex.dayofyear \
+ pandas.PeriodIndex.day_of_year \
+ pandas.PeriodIndex.days_in_month \
+ pandas.PeriodIndex.daysinmonth \
+ pandas.PeriodIndex.end_time \
+ pandas.PeriodIndex.freqstr \
+ pandas.PeriodIndex.hour \
+ pandas.PeriodIndex.is_leap_year \
+ pandas.PeriodIndex.minute \
+ pandas.PeriodIndex.month \
+ pandas.PeriodIndex.quarter \
+ pandas.PeriodIndex.second \
+ pandas.PeriodIndex.week \
+ pandas.PeriodIndex.weekday \
+ pandas.PeriodIndex.weekofyear \
+ pandas.PeriodIndex.year \
+ pandas.PeriodIndex.to_timestamp \
+ pandas.core.window.rolling.Rolling.max \
+ pandas.core.window.rolling.Rolling.cov \
+ pandas.core.window.rolling.Rolling.skew \
+ pandas.core.window.rolling.Rolling.apply \
+ pandas.core.window.rolling.Window.mean \
+ pandas.core.window.rolling.Window.sum \
+ pandas.core.window.rolling.Window.var \
+ pandas.core.window.rolling.Window.std \
+ pandas.core.window.expanding.Expanding.count \
+ pandas.core.window.expanding.Expanding.sum \
+ pandas.core.window.expanding.Expanding.mean \
+ pandas.core.window.expanding.Expanding.median \
+ pandas.core.window.expanding.Expanding.min \
+ pandas.core.window.expanding.Expanding.max \
+ pandas.core.window.expanding.Expanding.corr \
+ pandas.core.window.expanding.Expanding.cov \
+ pandas.core.window.expanding.Expanding.skew \
+ pandas.core.window.expanding.Expanding.apply \
+ pandas.core.window.expanding.Expanding.quantile \
+ pandas.core.window.ewm.ExponentialMovingWindow.mean \
+ pandas.core.window.ewm.ExponentialMovingWindow.sum \
+ pandas.core.window.ewm.ExponentialMovingWindow.std \
+ pandas.core.window.ewm.ExponentialMovingWindow.var \
+ pandas.core.window.ewm.ExponentialMovingWindow.corr \
+ pandas.core.window.ewm.ExponentialMovingWindow.cov \
+ pandas.api.indexers.BaseIndexer \
+ pandas.api.indexers.VariableOffsetWindowIndexer \
+ pandas.core.groupby.DataFrameGroupBy.__iter__ \
+ pandas.core.groupby.SeriesGroupBy.__iter__ \
+ pandas.core.groupby.DataFrameGroupBy.groups \
+ pandas.core.groupby.SeriesGroupBy.groups \
+ pandas.core.groupby.DataFrameGroupBy.indices \
+ pandas.core.groupby.SeriesGroupBy.indices \
+ pandas.core.groupby.DataFrameGroupBy.get_group \
+ pandas.core.groupby.SeriesGroupBy.get_group \
+ pandas.core.groupby.DataFrameGroupBy.all \
+ pandas.core.groupby.DataFrameGroupBy.any \
+ pandas.core.groupby.DataFrameGroupBy.bfill \
+ pandas.core.groupby.DataFrameGroupBy.count \
+ pandas.core.groupby.DataFrameGroupBy.cummax \
+ pandas.core.groupby.DataFrameGroupBy.cummin \
+ pandas.core.groupby.DataFrameGroupBy.cumprod \
+ pandas.core.groupby.DataFrameGroupBy.cumsum \
+ pandas.core.groupby.DataFrameGroupBy.diff \
+ pandas.core.groupby.DataFrameGroupBy.ffill \
+ pandas.core.groupby.DataFrameGroupBy.max \
+ pandas.core.groupby.DataFrameGroupBy.median \
+ pandas.core.groupby.DataFrameGroupBy.min \
+ pandas.core.groupby.DataFrameGroupBy.ohlc \
+ pandas.core.groupby.DataFrameGroupBy.pct_change \
+ pandas.core.groupby.DataFrameGroupBy.prod \
+ pandas.core.groupby.DataFrameGroupBy.sem \
+ pandas.core.groupby.DataFrameGroupBy.shift \
+ pandas.core.groupby.DataFrameGroupBy.size \
+ pandas.core.groupby.DataFrameGroupBy.skew \
+ pandas.core.groupby.DataFrameGroupBy.std \
+ pandas.core.groupby.DataFrameGroupBy.sum \
+ pandas.core.groupby.DataFrameGroupBy.var \
+ pandas.core.groupby.SeriesGroupBy.all \
+ pandas.core.groupby.SeriesGroupBy.any \
+ pandas.core.groupby.SeriesGroupBy.bfill \
+ pandas.core.groupby.SeriesGroupBy.count \
+ pandas.core.groupby.SeriesGroupBy.cummax \
+ pandas.core.groupby.SeriesGroupBy.cummin \
+ pandas.core.groupby.SeriesGroupBy.cumprod \
+ pandas.core.groupby.SeriesGroupBy.cumsum \
+ pandas.core.groupby.SeriesGroupBy.diff \
+ pandas.core.groupby.SeriesGroupBy.ffill \
+ pandas.core.groupby.SeriesGroupBy.is_monotonic_increasing \
+ pandas.core.groupby.SeriesGroupBy.is_monotonic_decreasing \
+ pandas.core.groupby.SeriesGroupBy.max \
+ pandas.core.groupby.SeriesGroupBy.median \
+ pandas.core.groupby.SeriesGroupBy.min \
+ pandas.core.groupby.SeriesGroupBy.nunique \
+ pandas.core.groupby.SeriesGroupBy.ohlc \
+ pandas.core.groupby.SeriesGroupBy.pct_change \
+ pandas.core.groupby.SeriesGroupBy.prod \
+ pandas.core.groupby.SeriesGroupBy.sem \
+ pandas.core.groupby.SeriesGroupBy.shift \
+ pandas.core.groupby.SeriesGroupBy.size \
+ pandas.core.groupby.SeriesGroupBy.skew \
+ pandas.core.groupby.SeriesGroupBy.std \
+ pandas.core.groupby.SeriesGroupBy.sum \
+ pandas.core.groupby.SeriesGroupBy.var \
+ pandas.core.groupby.SeriesGroupBy.hist \
+ pandas.core.groupby.DataFrameGroupBy.plot \
+ pandas.core.groupby.SeriesGroupBy.plot \
+ pandas.io.formats.style.Styler \
+ pandas.io.formats.style.Styler.from_custom_template \
+ pandas.io.formats.style.Styler.set_caption \
+ pandas.io.formats.style.Styler.set_sticky \
+ pandas.io.formats.style.Styler.set_uuid \
+ pandas.io.formats.style.Styler.clear \
+ pandas.io.formats.style.Styler.highlight_null \
+ pandas.io.formats.style.Styler.highlight_max \
+ pandas.io.formats.style.Styler.highlight_min \
+ pandas.io.formats.style.Styler.bar \
+ pandas.io.formats.style.Styler.to_string \
+ pandas.api.extensions.ExtensionDtype \
+ pandas.api.extensions.ExtensionArray \
+ pandas.arrays.PandasArray \
+ pandas.api.extensions.ExtensionArray._accumulate \
+ pandas.api.extensions.ExtensionArray._concat_same_type \
+ pandas.api.extensions.ExtensionArray._formatter \
+ pandas.api.extensions.ExtensionArray._from_factorized \
+ pandas.api.extensions.ExtensionArray._from_sequence \
+ pandas.api.extensions.ExtensionArray._from_sequence_of_strings \
+ pandas.api.extensions.ExtensionArray._reduce \
+ pandas.api.extensions.ExtensionArray._values_for_argsort \
+ pandas.api.extensions.ExtensionArray._values_for_factorize \
+ pandas.api.extensions.ExtensionArray.argsort \
+ pandas.api.extensions.ExtensionArray.astype \
+ pandas.api.extensions.ExtensionArray.copy \
+ pandas.api.extensions.ExtensionArray.view \
+ pandas.api.extensions.ExtensionArray.dropna \
+ pandas.api.extensions.ExtensionArray.equals \
+ pandas.api.extensions.ExtensionArray.factorize \
+ pandas.api.extensions.ExtensionArray.fillna \
+ pandas.api.extensions.ExtensionArray.insert \
+ pandas.api.extensions.ExtensionArray.isin \
+ pandas.api.extensions.ExtensionArray.isna \
+ pandas.api.extensions.ExtensionArray.ravel \
+ pandas.api.extensions.ExtensionArray.searchsorted \
+ pandas.api.extensions.ExtensionArray.shift \
+ pandas.api.extensions.ExtensionArray.unique \
+ pandas.api.extensions.ExtensionArray.dtype \
+ pandas.api.extensions.ExtensionArray.nbytes \
+ pandas.api.extensions.ExtensionArray.ndim \
+ pandas.api.extensions.ExtensionArray.shape \
+ pandas.api.extensions.ExtensionArray.tolist \
+ pandas.DataFrame.index \
+ pandas.DataFrame.columns \
+ pandas.DataFrame.__iter__ \
+ pandas.DataFrame.keys \
+ pandas.DataFrame.iterrows \
+ pandas.DataFrame.pipe \
+ pandas.DataFrame.kurt \
+ pandas.DataFrame.kurtosis \
+ pandas.DataFrame.mean \
+ pandas.DataFrame.median \
+ pandas.DataFrame.sem \
+ pandas.DataFrame.skew \
+ pandas.DataFrame.backfill \
+ pandas.DataFrame.pad \
+ pandas.DataFrame.swapaxes \
+ pandas.DataFrame.first_valid_index \
+ pandas.DataFrame.last_valid_index \
+ pandas.DataFrame.to_timestamp \
+ pandas.DataFrame.attrs \
+ pandas.DataFrame.plot \
+ pandas.DataFrame.sparse.density \
+ pandas.DataFrame.sparse.to_coo \
+ pandas.DataFrame.to_gbq \
+ pandas.DataFrame.style \
+ pandas.DataFrame.__dataframe__ \
+ RET=$(($RET + $?)) ; echo $MSG "DONE"
+
fi
### DOCUMENTATION NOTEBOOKS ###
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Towards #37875
See https://github.com/pandas-dev/pandas/pull/50896#issuecomment-1398768593 | https://api.github.com/repos/pandas-dev/pandas/pulls/50956 | 2023-01-24T11:26:04Z | 2023-01-24T18:45:51Z | 2023-01-24T18:45:51Z | 2023-01-25T15:18:39Z |
Updated value_counts documentation and implementation and added single label subset test | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index ac7d30310be9e..f63563472f1f2 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -1088,6 +1088,7 @@ Removal of prior version deprecations/changes
- Arguments after ``expr`` in :meth:`DataFrame.eval` and :meth:`DataFrame.query` are keyword-only (:issue:`47587`)
- Removed :meth:`Index._get_attributes_dict` (:issue:`50648`)
- Removed :meth:`Series.__array_wrap__` (:issue:`50648`)
+- Changed behavior of :meth:`.DataFrame.value_counts` to return a :class:`Series` with :class:`MultiIndex` for any list-like(one element or not) but an :class:`Index` for a single label (:issue:`50829`)
.. ---------------------------------------------------------------------------
.. _whatsnew_200.performance:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 4da8cb975b975..2520f54ae3d99 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -6957,7 +6957,7 @@ def value_counts(
Parameters
----------
- subset : list-like, optional
+ subset : label or list of labels, optional
Columns to use when counting unique combinations.
normalize : bool, default False
Return proportions rather than frequencies.
@@ -6981,9 +6981,10 @@ def value_counts(
Notes
-----
The returned Series will have a MultiIndex with one level per input
- column. By default, rows that contain any NA values are omitted from
- the result. By default, the resulting Series will be in descending
- order so that the first element is the most frequently-occurring row.
+ column but an Index (non-multi) for a single label. By default, rows
+ that contain any NA values are omitted from the result. By default,
+ the resulting Series will be in descending order so that the first
+ element is the most frequently-occurring row.
Examples
--------
@@ -7049,6 +7050,13 @@ def value_counts(
John Smith 1
NaN 1
Name: count, dtype: int64
+
+ >>> df.value_counts("first_name")
+ first_name
+ John 2
+ Anne 1
+ Beth 1
+ Name: count, dtype: int64
"""
if subset is None:
subset = self.columns.tolist()
@@ -7063,7 +7071,7 @@ def value_counts(
counts /= counts.sum()
# Force MultiIndex for single column
- if len(subset) == 1:
+ if is_list_like(subset) and len(subset) == 1:
counts.index = MultiIndex.from_arrays(
[counts.index], names=[counts.index.name]
)
diff --git a/pandas/tests/frame/methods/test_value_counts.py b/pandas/tests/frame/methods/test_value_counts.py
index e8c129fd12bfd..355f05cd5156c 100644
--- a/pandas/tests/frame/methods/test_value_counts.py
+++ b/pandas/tests/frame/methods/test_value_counts.py
@@ -1,4 +1,5 @@
import numpy as np
+import pytest
import pandas as pd
import pandas._testing as tm
@@ -155,3 +156,22 @@ def test_data_frame_value_counts_dropna_false(nulls_fixture):
)
tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize("columns", (["first_name", "middle_name"], [0, 1]))
+def test_data_frame_value_counts_subset(nulls_fixture, columns):
+ # GH 50829
+ df = pd.DataFrame(
+ {
+ columns[0]: ["John", "Anne", "John", "Beth"],
+ columns[1]: ["Smith", nulls_fixture, nulls_fixture, "Louise"],
+ },
+ )
+ result = df.value_counts(columns[0])
+ expected = pd.Series(
+ data=[2, 1, 1],
+ index=pd.Index(["John", "Anne", "Beth"], name=columns[0]),
+ name="count",
+ )
+
+ tm.assert_series_equal(result, expected)
| - [ ] closes #50829
- [x] Tests added and passed.
- [x] All code checks passed.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50955 | 2023-01-24T05:45:21Z | 2023-02-20T03:27:38Z | 2023-02-20T03:27:38Z | 2023-02-20T03:27:47Z |
ENH: Allow dt accessor when using ArrowDtype with datetime types | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 7d028935ad175..421a19fc15f28 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -263,6 +263,7 @@ Alternatively, copy on write can be enabled locally through:
Other enhancements
^^^^^^^^^^^^^^^^^^
+- Added support for ``dt`` accessor methods when using :class:`ArrowDtype` with a ``pyarrow.timestamp`` type (:issue:`50954`)
- :func:`read_sas` now supports using ``encoding='infer'`` to correctly read and use the encoding specified by the sas file. (:issue:`48048`)
- :meth:`.DataFrameGroupBy.quantile`, :meth:`.SeriesGroupBy.quantile` and :meth:`.DataFrameGroupBy.std` now preserve nullable dtypes instead of casting to numpy dtypes (:issue:`37493`)
- :meth:`Series.add_suffix`, :meth:`DataFrame.add_suffix`, :meth:`Series.add_prefix` and :meth:`DataFrame.add_prefix` support an ``axis`` argument. If ``axis`` is set, the default behaviour of which axis to consider can be overwritten (:issue:`47819`)
diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py
index 7390b04da4787..64bb9407ea83b 100644
--- a/pandas/core/accessor.py
+++ b/pandas/core/accessor.py
@@ -6,7 +6,10 @@
"""
from __future__ import annotations
-from typing import final
+from typing import (
+ Callable,
+ final,
+)
import warnings
from pandas.util._decorators import doc
@@ -59,7 +62,13 @@ def _delegate_method(self, name, *args, **kwargs):
@classmethod
def _add_delegate_accessors(
- cls, delegate, accessors, typ: str, overwrite: bool = False
+ cls,
+ delegate,
+ accessors: list[str],
+ typ: str,
+ overwrite: bool = False,
+ accessor_mapping: Callable[[str], str] = lambda x: x,
+ raise_on_missing: bool = True,
) -> None:
"""
Add accessors to cls from the delegate class.
@@ -75,6 +84,11 @@ def _add_delegate_accessors(
typ : {'property', 'method'}
overwrite : bool, default False
Overwrite the method/property in the target class if it exists.
+ accessor_mapping: Callable, default lambda x: x
+ Callable to map the delegate's function to the cls' function.
+ raise_on_missing: bool, default True
+ Raise if an accessor does not exist on delegate.
+ False skips the missing accessor.
"""
def _create_delegator_property(name):
@@ -88,7 +102,9 @@ def _setter(self, new_values):
_setter.__name__ = name
return property(
- fget=_getter, fset=_setter, doc=getattr(delegate, name).__doc__
+ fget=_getter,
+ fset=_setter,
+ doc=getattr(delegate, accessor_mapping(name)).__doc__,
)
def _create_delegator_method(name):
@@ -96,12 +112,18 @@ def f(self, *args, **kwargs):
return self._delegate_method(name, *args, **kwargs)
f.__name__ = name
- f.__doc__ = getattr(delegate, name).__doc__
+ f.__doc__ = getattr(delegate, accessor_mapping(name)).__doc__
return f
for name in accessors:
+ if (
+ not raise_on_missing
+ and getattr(delegate, accessor_mapping(name), None) is None
+ ):
+ continue
+
if typ == "property":
f = _create_delegator_property(name)
else:
@@ -112,7 +134,14 @@ def f(self, *args, **kwargs):
setattr(cls, name, f)
-def delegate_names(delegate, accessors, typ: str, overwrite: bool = False):
+def delegate_names(
+ delegate,
+ accessors: list[str],
+ typ: str,
+ overwrite: bool = False,
+ accessor_mapping: Callable[[str], str] = lambda x: x,
+ raise_on_missing: bool = True,
+):
"""
Add delegated names to a class using a class decorator. This provides
an alternative usage to directly calling `_add_delegate_accessors`
@@ -127,6 +156,11 @@ def delegate_names(delegate, accessors, typ: str, overwrite: bool = False):
typ : {'property', 'method'}
overwrite : bool, default False
Overwrite the method/property in the target class if it exists.
+ accessor_mapping: Callable, default lambda x: x
+ Callable to map the delegate's function to the cls' function.
+ raise_on_missing: bool, default True
+ Raise if an accessor does not exist on delegate.
+ False skips the missing accessor.
Returns
-------
@@ -141,7 +175,14 @@ class CategoricalAccessor(PandasDelegate):
"""
def add_delegate_accessors(cls):
- cls._add_delegate_accessors(delegate, accessors, typ, overwrite=overwrite)
+ cls._add_delegate_accessors(
+ delegate,
+ accessors,
+ typ,
+ overwrite=overwrite,
+ accessor_mapping=accessor_mapping,
+ raise_on_missing=raise_on_missing,
+ )
return cls
return add_delegate_accessors
diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py
index a4cde823c6713..ad10d82c0ca3c 100644
--- a/pandas/core/arrays/arrow/array.py
+++ b/pandas/core/arrays/arrow/array.py
@@ -23,6 +23,8 @@
Scalar,
SortKind,
TakeIndexer,
+ TimeAmbiguous,
+ TimeNonexistent,
npt,
)
from pandas.compat import (
@@ -53,6 +55,8 @@
validate_indices,
)
+from pandas.tseries.frequencies import to_offset
+
if not pa_version_under7p0:
import pyarrow as pa
import pyarrow.compute as pc
@@ -1413,3 +1417,154 @@ def _replace_with_mask(
result = np.array(values, dtype=object)
result[mask] = replacements
return pa.array(result, type=values.type, from_pandas=True)
+
+ @property
+ def _dt_day(self):
+ return type(self)(pc.day(self._data))
+
+ @property
+ def _dt_day_of_week(self):
+ return type(self)(pc.day_of_week(self._data))
+
+ _dt_dayofweek = _dt_day_of_week
+ _dt_weekday = _dt_day_of_week
+
+ @property
+ def _dt_day_of_year(self):
+ return type(self)(pc.day_of_year(self._data))
+
+ _dt_dayofyear = _dt_day_of_year
+
+ @property
+ def _dt_hour(self):
+ return type(self)(pc.hour(self._data))
+
+ def _dt_isocalendar(self):
+ return type(self)(pc.iso_calendar(self._data))
+
+ @property
+ def _dt_is_leap_year(self):
+ return type(self)(pc.is_leap_year(self._data))
+
+ @property
+ def _dt_microsecond(self):
+ return type(self)(pc.microsecond(self._data))
+
+ @property
+ def _dt_minute(self):
+ return type(self)(pc.minute(self._data))
+
+ @property
+ def _dt_month(self):
+ return type(self)(pc.month(self._data))
+
+ @property
+ def _dt_nanosecond(self):
+ return type(self)(pc.nanosecond(self._data))
+
+ @property
+ def _dt_quarter(self):
+ return type(self)(pc.quarter(self._data))
+
+ @property
+ def _dt_second(self):
+ return type(self)(pc.second(self._data))
+
+ @property
+ def _dt_date(self):
+ return type(self)(self._data.cast(pa.date64()))
+
+ @property
+ def _dt_time(self):
+ unit = (
+ self.dtype.pyarrow_dtype.unit
+ if self.dtype.pyarrow_dtype.unit in {"us", "ns"}
+ else "ns"
+ )
+ return type(self)(self._data.cast(pa.time64(unit)))
+
+ @property
+ def _dt_tz(self):
+ return self.dtype.pyarrow_dtype.tz
+
+ def _dt_strftime(self, format: str):
+ return type(self)(pc.strftime(self._data, format=format))
+
+ def _round_temporally(
+ self,
+ method: Literal["ceil", "floor", "round"],
+ freq,
+ ambiguous: TimeAmbiguous = "raise",
+ nonexistent: TimeNonexistent = "raise",
+ ):
+ if ambiguous != "raise":
+ raise NotImplementedError("ambiguous is not supported.")
+ if nonexistent != "raise":
+ raise NotImplementedError("nonexistent is not supported.")
+ offset = to_offset(freq)
+ if offset is None:
+ raise ValueError(f"Must specify a valid frequency: {freq}")
+ pa_supported_unit = {
+ "A": "year",
+ "AS": "year",
+ "Q": "quarter",
+ "QS": "quarter",
+ "M": "month",
+ "MS": "month",
+ "W": "week",
+ "D": "day",
+ "H": "hour",
+ "T": "minute",
+ "S": "second",
+ "L": "millisecond",
+ "U": "microsecond",
+ "N": "nanosecond",
+ }
+ unit = pa_supported_unit.get(offset._prefix, None)
+ if unit is None:
+ raise ValueError(f"{freq=} is not supported")
+ multiple = offset.n
+ rounding_method = getattr(pc, f"{method}_temporal")
+ return type(self)(rounding_method(self._data, multiple=multiple, unit=unit))
+
+ def _dt_ceil(
+ self,
+ freq,
+ ambiguous: TimeAmbiguous = "raise",
+ nonexistent: TimeNonexistent = "raise",
+ ):
+ return self._round_temporally("ceil", freq, ambiguous, nonexistent)
+
+ def _dt_floor(
+ self,
+ freq,
+ ambiguous: TimeAmbiguous = "raise",
+ nonexistent: TimeNonexistent = "raise",
+ ):
+ return self._round_temporally("floor", freq, ambiguous, nonexistent)
+
+ def _dt_round(
+ self,
+ freq,
+ ambiguous: TimeAmbiguous = "raise",
+ nonexistent: TimeNonexistent = "raise",
+ ):
+ return self._round_temporally("round", freq, ambiguous, nonexistent)
+
+ def _dt_tz_localize(
+ self,
+ tz,
+ ambiguous: TimeAmbiguous = "raise",
+ nonexistent: TimeNonexistent = "raise",
+ ):
+ if ambiguous != "raise":
+ raise NotImplementedError(f"{ambiguous=} is not supported")
+ if nonexistent != "raise":
+ raise NotImplementedError(f"{nonexistent=} is not supported")
+ if tz is None:
+ new_type = pa.timestamp(self.dtype.pyarrow_dtype.unit)
+ return type(self)(self._data.cast(new_type))
+ pa_tz = str(tz)
+ return type(self)(
+ self._data.cast(pa.timestamp(self.dtype.pyarrow_dtype.unit, pa_tz))
+ )
diff --git a/pandas/core/arrays/arrow/dtype.py b/pandas/core/arrays/arrow/dtype.py
index 907804ab9e51d..90c86cd6d55ef 100644
--- a/pandas/core/arrays/arrow/dtype.py
+++ b/pandas/core/arrays/arrow/dtype.py
@@ -112,6 +112,9 @@ def numpy_dtype(self) -> np.dtype:
@cache_readonly
def kind(self) -> str:
+ if pa.types.is_timestamp(self.pyarrow_dtype):
+ # To mirror DatetimeTZDtype
+ return "M"
return self.numpy_dtype.kind
@cache_readonly
diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py
index 9e4680d2205b9..7525e8131fabf 100644
--- a/pandas/core/indexes/accessors.py
+++ b/pandas/core/indexes/accessors.py
@@ -3,7 +3,10 @@
"""
from __future__ import annotations
-from typing import TYPE_CHECKING
+from typing import (
+ TYPE_CHECKING,
+ cast,
+)
import numpy as np
@@ -27,6 +30,8 @@
PeriodArray,
TimedeltaArray,
)
+from pandas.core.arrays.arrow.array import ArrowExtensionArray
+from pandas.core.arrays.arrow.dtype import ArrowDtype
from pandas.core.base import (
NoNewAttributesMixin,
PandasObject,
@@ -139,6 +144,88 @@ def _delegate_method(self, name, *args, **kwargs):
return result
+@delegate_names(
+ delegate=ArrowExtensionArray,
+ accessors=DatetimeArray._datetimelike_ops,
+ typ="property",
+ accessor_mapping=lambda x: f"_dt_{x}",
+ raise_on_missing=False,
+)
+@delegate_names(
+ delegate=ArrowExtensionArray,
+ accessors=DatetimeArray._datetimelike_methods,
+ typ="method",
+ accessor_mapping=lambda x: f"_dt_{x}",
+ raise_on_missing=False,
+)
+class ArrowTemporalProperties(PandasDelegate, PandasObject, NoNewAttributesMixin):
+ def __init__(self, data: Series, orig) -> None:
+ if not isinstance(data, ABCSeries):
+ raise TypeError(
+ f"cannot convert an object of type {type(data)} to a datetimelike index"
+ )
+
+ self._parent = data
+ self._orig = orig
+ self._freeze()
+
+ def _delegate_property_get(self, name: str): # type: ignore[override]
+ if not hasattr(self._parent.array, f"_dt_{name}"):
+ raise NotImplementedError(
+ f"dt.{name} is not supported for {self._parent.dtype}"
+ )
+ result = getattr(self._parent.array, f"_dt_{name}")
+
+ if not is_list_like(result):
+ return result
+
+ if self._orig is not None:
+ index = self._orig.index
+ else:
+ index = self._parent.index
+ # return the result as a Series, which is by definition a copy
+ result = type(self._parent)(
+ result, index=index, name=self._parent.name
+ ).__finalize__(self._parent)
+
+ return result
+
+ def _delegate_method(self, name: str, *args, **kwargs):
+ if not hasattr(self._parent.array, f"_dt_{name}"):
+ raise NotImplementedError(
+ f"dt.{name} is not supported for {self._parent.dtype}"
+ )
+
+ result = getattr(self._parent.array, f"_dt_{name}")(*args, **kwargs)
+
+ if self._orig is not None:
+ index = self._orig.index
+ else:
+ index = self._parent.index
+ # return the result as a Series, which is by definition a copy
+ result = type(self._parent)(
+ result, index=index, name=self._parent.name
+ ).__finalize__(self._parent)
+
+ return result
+
+ def isocalendar(self):
+ from pandas import DataFrame
+
+ result = (
+ cast(ArrowExtensionArray, self._parent.array)
+ ._dt_isocalendar()
+ ._data.combine_chunks()
+ )
+ iso_calendar_df = DataFrame(
+ {
+ col: type(self._parent.array)(result.field(i)) # type: ignore[call-arg]
+ for i, col in enumerate(["year", "week", "day"])
+ }
+ )
+ return iso_calendar_df
+
+
@delegate_names(
delegate=DatetimeArray, accessors=DatetimeArray._datetimelike_ops, typ="property"
)
@@ -472,6 +559,8 @@ def __new__(cls, data: Series):
index=orig.index,
)
+ if isinstance(data.dtype, ArrowDtype) and data.dtype.kind == "M":
+ return ArrowTemporalProperties(data, orig)
if is_datetime64_dtype(data.dtype):
return DatetimeProperties(data, orig)
elif is_datetime64tz_dtype(data.dtype):
diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py
index 522a0d59e4161..51edae326417a 100644
--- a/pandas/tests/extension/test_arrow.py
+++ b/pandas/tests/extension/test_arrow.py
@@ -1599,3 +1599,207 @@ def test_searchsorted_with_na_raises(data_for_sorting, as_series):
)
with pytest.raises(ValueError, match=msg):
arr.searchsorted(b)
+
+
+def test_unsupported_dt(data):
+ pa_dtype = data.dtype.pyarrow_dtype
+ if not pa.types.is_temporal(pa_dtype):
+ with pytest.raises(
+ AttributeError, match="Can only use .dt accessor with datetimelike values"
+ ):
+ pd.Series(data).dt
+
+
+@pytest.mark.parametrize(
+ "prop, expected",
+ [
+ ["day", 2],
+ ["day_of_week", 0],
+ ["dayofweek", 0],
+ ["weekday", 0],
+ ["day_of_year", 2],
+ ["dayofyear", 2],
+ ["hour", 3],
+ ["minute", 4],
+ pytest.param(
+ "is_leap_year",
+ False,
+ marks=pytest.mark.xfail(
+ pa_version_under8p0,
+ raises=NotImplementedError,
+ reason="is_leap_year not implemented for pyarrow < 8.0",
+ ),
+ ),
+ ["microsecond", 5],
+ ["month", 1],
+ ["nanosecond", 6],
+ ["quarter", 1],
+ ["second", 7],
+ ["date", date(2023, 1, 2)],
+ ["time", time(3, 4, 7, 5)],
+ ],
+)
+def test_dt_properties(prop, expected):
+ ser = pd.Series(
+ [
+ pd.Timestamp(
+ year=2023,
+ month=1,
+ day=2,
+ hour=3,
+ minute=4,
+ second=7,
+ microsecond=5,
+ nanosecond=6,
+ ),
+ None,
+ ],
+ dtype=ArrowDtype(pa.timestamp("ns")),
+ )
+ result = getattr(ser.dt, prop)
+ exp_type = None
+ if isinstance(expected, date):
+ exp_type = pa.date64()
+ elif isinstance(expected, time):
+ exp_type = pa.time64("ns")
+ expected = pd.Series(ArrowExtensionArray(pa.array([expected, None], type=exp_type)))
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize("unit", ["us", "ns"])
+def test_dt_time_preserve_unit(unit):
+ ser = pd.Series(
+ [datetime(year=2023, month=1, day=2, hour=3), None],
+ dtype=ArrowDtype(pa.timestamp(unit)),
+ )
+ result = ser.dt.time
+ expected = pd.Series(
+ ArrowExtensionArray(pa.array([time(3, 0), None], type=pa.time64(unit)))
+ )
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize("tz", [None, "UTC", "US/Pacific"])
+def test_dt_tz(tz):
+ ser = pd.Series(
+ [datetime(year=2023, month=1, day=2, hour=3), None],
+ dtype=ArrowDtype(pa.timestamp("ns", tz=tz)),
+ )
+ result = ser.dt.tz
+ assert result == tz
+
+
+def test_dt_isocalendar():
+ ser = pd.Series(
+ [datetime(year=2023, month=1, day=2, hour=3), None],
+ dtype=ArrowDtype(pa.timestamp("ns")),
+ )
+ result = ser.dt.isocalendar()
+ expected = pd.DataFrame(
+ [[2023, 1, 1], [0, 0, 0]],
+ columns=["year", "week", "day"],
+ dtype="int64[pyarrow]",
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_dt_strftime(request):
+ if is_platform_windows() and is_ci_environment():
+ request.node.add_marker(
+ pytest.mark.xfail(
+ raises=pa.ArrowInvalid,
+ reason=(
+ "TODO: Set ARROW_TIMEZONE_DATABASE environment variable "
+ "on CI to path to the tzdata for pyarrow."
+ ),
+ )
+ )
+ ser = pd.Series(
+ [datetime(year=2023, month=1, day=2, hour=3), None],
+ dtype=ArrowDtype(pa.timestamp("ns")),
+ )
+ result = ser.dt.strftime("%Y-%m-%dT%H:%M:%S")
+ expected = pd.Series(
+ ["2023-01-02T03:00:00.000000000", None], dtype=ArrowDtype(pa.string())
+ )
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize("method", ["ceil", "floor", "round"])
+def test_dt_roundlike_tz_options_not_supported(method):
+ ser = pd.Series(
+ [datetime(year=2023, month=1, day=2, hour=3), None],
+ dtype=ArrowDtype(pa.timestamp("ns")),
+ )
+ with pytest.raises(NotImplementedError, match="ambiguous is not supported."):
+ getattr(ser.dt, method)("1H", ambiguous="NaT")
+
+ with pytest.raises(NotImplementedError, match="nonexistent is not supported."):
+ getattr(ser.dt, method)("1H", nonexistent="NaT")
+
+
+@pytest.mark.parametrize("method", ["ceil", "floor", "round"])
+def test_dt_roundlike_unsupported_freq(method):
+ ser = pd.Series(
+ [datetime(year=2023, month=1, day=2, hour=3), None],
+ dtype=ArrowDtype(pa.timestamp("ns")),
+ )
+ with pytest.raises(ValueError, match="freq='1B' is not supported"):
+ getattr(ser.dt, method)("1B")
+
+ with pytest.raises(ValueError, match="Must specify a valid frequency: None"):
+ getattr(ser.dt, method)(None)
+
+
+@pytest.mark.xfail(
+ pa_version_under7p0, reason="Methods not supported for pyarrow < 7.0"
+)
+@pytest.mark.parametrize("freq", ["D", "H", "T", "S", "L", "U", "N"])
+@pytest.mark.parametrize("method", ["ceil", "floor", "round"])
+def test_dt_ceil_year_floor(freq, method):
+ ser = pd.Series(
+ [datetime(year=2023, month=1, day=1), None],
+ )
+ pa_dtype = ArrowDtype(pa.timestamp("ns"))
+ expected = getattr(ser.dt, method)(f"1{freq}").astype(pa_dtype)
+ result = getattr(ser.astype(pa_dtype).dt, method)(f"1{freq}")
+ tm.assert_series_equal(result, expected)
+
+
+def test_dt_tz_localize_unsupported_tz_options():
+ ser = pd.Series(
+ [datetime(year=2023, month=1, day=2, hour=3), None],
+ dtype=ArrowDtype(pa.timestamp("ns")),
+ )
+ with pytest.raises(NotImplementedError, match="ambiguous='NaT' is not supported"):
+ ser.dt.tz_localize("UTC", ambiguous="NaT")
+
+ with pytest.raises(NotImplementedError, match="nonexistent='NaT' is not supported"):
+ ser.dt.tz_localize("UTC", nonexistent="NaT")
+
+
+def test_dt_tz_localize_none():
+ ser = pd.Series(
+ [datetime(year=2023, month=1, day=2, hour=3), None],
+ dtype=ArrowDtype(pa.timestamp("ns", tz="US/Pacific")),
+ )
+ result = ser.dt.tz_localize(None)
+ expected = pd.Series(
+ [datetime(year=2023, month=1, day=2, hour=3), None],
+ dtype=ArrowDtype(pa.timestamp("ns")),
+ )
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize("unit", ["us", "ns"])
+def test_dt_tz_localize(unit):
+ ser = pd.Series(
+ [datetime(year=2023, month=1, day=2, hour=3), None],
+ dtype=ArrowDtype(pa.timestamp(unit)),
+ )
+ result = ser.dt.tz_localize("US/Pacific")
+ expected = pd.Series(
+ [datetime(year=2023, month=1, day=2, hour=3), None],
+ dtype=ArrowDtype(pa.timestamp(unit, "US/Pacific")),
+ )
+ tm.assert_series_equal(result, expected)
| Enabled & tested for `pa.timestamp` types so far. Will follow up with `pa.duration` types in a subsequent PR
| https://api.github.com/repos/pandas-dev/pandas/pulls/50954 | 2023-01-24T04:30:41Z | 2023-02-08T16:53:07Z | 2023-02-08T16:53:07Z | 2023-02-08T16:53:11Z |
DOC: Fix rolling.rank see also section | diff --git a/pandas/core/window/doc.py b/pandas/core/window/doc.py
index 6e188531a0502..b1ff53e9d1a44 100644
--- a/pandas/core/window/doc.py
+++ b/pandas/core/window/doc.py
@@ -24,10 +24,10 @@ def create_section_header(header: str) -> str:
template_see_also = dedent(
"""
- Series.{window_method} : Calling {window_method} with Series data.
- DataFrame.{window_method} : Calling {window_method} with DataFrames.
- Series.{agg_method} : Aggregating {agg_method} for Series.
- DataFrame.{agg_method} : Aggregating {agg_method} for DataFrame.\n
+ pandas.Series.{window_method} : Calling {window_method} with Series data.
+ pandas.DataFrame.{window_method} : Calling {window_method} with DataFrames.
+ pandas.Series.{agg_method} : Aggregating {agg_method} for Series.
+ pandas.DataFrame.{agg_method} : Aggregating {agg_method} for DataFrame.\n
"""
).replace("\n", "", 1)
| - [x] closes #50929 (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50952 | 2023-01-24T02:16:26Z | 2023-01-24T13:35:04Z | 2023-01-24T13:35:04Z | 2023-01-24T13:56:03Z |
ENH: Add arrow tests for get_dummies | diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py
index eb25566e7983e..045254d2041fc 100644
--- a/pandas/_testing/__init__.py
+++ b/pandas/_testing/__init__.py
@@ -42,6 +42,7 @@
import pandas as pd
from pandas import (
+ ArrowDtype,
Categorical,
CategoricalIndex,
DataFrame,
@@ -198,10 +199,16 @@
UNSIGNED_INT_PYARROW_DTYPES = [pa.uint8(), pa.uint16(), pa.uint32(), pa.uint64()]
SIGNED_INT_PYARROW_DTYPES = [pa.int8(), pa.int16(), pa.int32(), pa.int64()]
ALL_INT_PYARROW_DTYPES = UNSIGNED_INT_PYARROW_DTYPES + SIGNED_INT_PYARROW_DTYPES
+ ALL_INT_PYARROW_DTYPES_STR_REPR = [
+ str(ArrowDtype(typ)) for typ in ALL_INT_PYARROW_DTYPES
+ ]
# pa.float16 doesn't seem supported
# https://github.com/apache/arrow/blob/master/python/pyarrow/src/arrow/python/helpers.cc#L86
FLOAT_PYARROW_DTYPES = [pa.float32(), pa.float64()]
+ FLOAT_PYARROW_DTYPES_STR_REPR = [
+ str(ArrowDtype(typ)) for typ in FLOAT_PYARROW_DTYPES
+ ]
STRING_PYARROW_DTYPES = [pa.string()]
BINARY_PYARROW_DTYPES = [pa.binary()]
@@ -234,6 +241,9 @@
+ TIMEDELTA_PYARROW_DTYPES
+ BOOL_PYARROW_DTYPES
)
+else:
+ FLOAT_PYARROW_DTYPES_STR_REPR = []
+ ALL_INT_PYARROW_DTYPES_STR_REPR = []
EMPTY_STRING_PATTERN = re.compile("^$")
diff --git a/pandas/conftest.py b/pandas/conftest.py
index 2e9638036eec5..b49dfeb92e2af 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -1527,6 +1527,43 @@ def any_numeric_ea_dtype(request):
return request.param
+# Unsupported operand types for + ("List[Union[str, ExtensionDtype, dtype[Any],
+# Type[object]]]" and "List[str]")
+@pytest.fixture(
+ params=tm.ALL_INT_EA_DTYPES
+ + tm.FLOAT_EA_DTYPES
+ + tm.ALL_INT_PYARROW_DTYPES_STR_REPR
+ + tm.FLOAT_PYARROW_DTYPES_STR_REPR # type: ignore[operator]
+)
+def any_numeric_ea_and_arrow_dtype(request):
+ """
+ Parameterized fixture for any nullable integer dtype and
+ any float ea dtypes.
+
+ * 'UInt8'
+ * 'Int8'
+ * 'UInt16'
+ * 'Int16'
+ * 'UInt32'
+ * 'Int32'
+ * 'UInt64'
+ * 'Int64'
+ * 'Float32'
+ * 'Float64'
+ * 'uint8[pyarrow]'
+ * 'int8[pyarrow]'
+ * 'uint16[pyarrow]'
+ * 'int16[pyarrow]'
+ * 'uint32[pyarrow]'
+ * 'int32[pyarrow]'
+ * 'uint64[pyarrow]'
+ * 'int64[pyarrow]'
+ * 'float32[pyarrow]'
+ * 'float64[pyarrow]'
+ """
+ return request.param
+
+
@pytest.fixture(params=tm.SIGNED_INT_EA_DTYPES)
def any_signed_int_ea_dtype(request):
"""
diff --git a/pandas/tests/reshape/test_get_dummies.py b/pandas/tests/reshape/test_get_dummies.py
index ed4da9562aeee..daac5a0c9dac2 100644
--- a/pandas/tests/reshape/test_get_dummies.py
+++ b/pandas/tests/reshape/test_get_dummies.py
@@ -658,22 +658,22 @@ def test_get_dummies_with_string_values(self, values):
with pytest.raises(TypeError, match=msg):
get_dummies(df, columns=values)
- def test_get_dummies_ea_dtype_series(self, any_numeric_ea_dtype):
+ def test_get_dummies_ea_dtype_series(self, any_numeric_ea_and_arrow_dtype):
# GH#32430
ser = Series(list("abca"))
- result = get_dummies(ser, dtype=any_numeric_ea_dtype)
+ result = get_dummies(ser, dtype=any_numeric_ea_and_arrow_dtype)
expected = DataFrame(
{"a": [1, 0, 0, 1], "b": [0, 1, 0, 0], "c": [0, 0, 1, 0]},
- dtype=any_numeric_ea_dtype,
+ dtype=any_numeric_ea_and_arrow_dtype,
)
tm.assert_frame_equal(result, expected)
- def test_get_dummies_ea_dtype_dataframe(self, any_numeric_ea_dtype):
+ def test_get_dummies_ea_dtype_dataframe(self, any_numeric_ea_and_arrow_dtype):
# GH#32430
df = DataFrame({"x": list("abca")})
- result = get_dummies(df, dtype=any_numeric_ea_dtype)
+ result = get_dummies(df, dtype=any_numeric_ea_and_arrow_dtype)
expected = DataFrame(
{"x_a": [1, 0, 0, 1], "x_b": [0, 1, 0, 0], "x_c": [0, 0, 1, 0]},
- dtype=any_numeric_ea_dtype,
+ dtype=any_numeric_ea_and_arrow_dtype,
)
tm.assert_frame_equal(result, expected)
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
cc @mroeschke
did not want to clutter the other pr with this. Is there an easier way of defining the variables? | https://api.github.com/repos/pandas-dev/pandas/pulls/50951 | 2023-01-24T02:12:36Z | 2023-01-26T18:09:32Z | 2023-01-26T18:09:32Z | 2023-01-27T00:38:30Z |
DEPR: parsing to tzlocal | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index dc05745c8c0e5..657f1d1fb84e6 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -611,6 +611,7 @@ Other API changes
Deprecations
~~~~~~~~~~~~
+- Deprecated parsing datetime strings with system-local timezone to ``tzlocal``, pass a ``tz`` keyword or explicitly call ``tz_localize`` instead (:issue:`50791`)
- Deprecated argument ``infer_datetime_format`` in :func:`to_datetime` and :func:`read_csv`, as a strict version of it is now the default (:issue:`48621`)
- Deprecated :func:`pandas.io.sql.execute` (:issue:`50185`)
- :meth:`Index.is_boolean` has been deprecated. Use :func:`pandas.api.types.is_bool_dtype` instead (:issue:`50042`)
diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx
index aa53f8d813874..485813a634f20 100644
--- a/pandas/_libs/tslibs/parsing.pyx
+++ b/pandas/_libs/tslibs/parsing.pyx
@@ -653,6 +653,18 @@ cdef dateutil_parse(
ret = ret + relativedelta.relativedelta(weekday=res.weekday)
if not ignoretz:
if res.tzname and res.tzname in time.tzname:
+ # GH#50791
+ if res.tzname != "UTC":
+ # If the system is localized in UTC (as many CI runs are)
+ # we get tzlocal, once the deprecation is enforced will get
+ # timezone.utc, not raise.
+ warnings.warn(
+ "Parsing '{res.tzname}' as tzlocal (dependent on system timezone) "
+ "is deprecated and will raise in a future version. Pass the 'tz' "
+ "keyword or call tz_localize after construction instead",
+ FutureWarning,
+ stacklevel=find_stack_level()
+ )
ret = ret.replace(tzinfo=_dateutil_tzlocal())
elif res.tzoffset == 0:
ret = ret.replace(tzinfo=_dateutil_tzutc())
diff --git a/pandas/tests/tslibs/test_array_to_datetime.py b/pandas/tests/tslibs/test_array_to_datetime.py
index 63adb8427969d..ba188c3182f57 100644
--- a/pandas/tests/tslibs/test_array_to_datetime.py
+++ b/pandas/tests/tslibs/test_array_to_datetime.py
@@ -71,7 +71,9 @@ def test_parsing_non_iso_timezone_offset():
dt_string = "01-01-2013T00:00:00.000000000+0000"
arr = np.array([dt_string], dtype=object)
- result, result_tz = tslib.array_to_datetime(arr)
+ with tm.assert_produces_warning(None):
+ # GH#50949 should not get tzlocal-deprecation warning here
+ result, result_tz = tslib.array_to_datetime(arr)
expected = np.array([np.datetime64("2013-01-01 00:00:00.000000000")])
tm.assert_numpy_array_equal(result, expected)
diff --git a/pandas/tests/tslibs/test_parsing.py b/pandas/tests/tslibs/test_parsing.py
index 33fce7b351513..b0f7f755518a4 100644
--- a/pandas/tests/tslibs/test_parsing.py
+++ b/pandas/tests/tslibs/test_parsing.py
@@ -5,6 +5,7 @@
import re
from dateutil.parser import parse as du_parse
+from dateutil.tz import tzlocal
import numpy as np
import pytest
@@ -18,6 +19,23 @@
import pandas._testing as tm
+@td.skip_if_windows
+def test_parsing_tzlocal_deprecated():
+ # GH#50791
+ msg = "Pass the 'tz' keyword or call tz_localize after construction instead"
+ dtstr = "Jan 15 2004 03:00 EST"
+
+ with tm.set_timezone("US/Eastern"):
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ res, _ = parse_datetime_string_with_reso(dtstr)
+
+ assert isinstance(res.tzinfo, tzlocal)
+
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ res = parsing.parse_datetime_string(dtstr)
+ assert isinstance(res.tzinfo, tzlocal)
+
+
def test_parse_datetime_string_with_reso():
(parsed, reso) = parse_datetime_string_with_reso("4Q1984")
(parsed_lower, reso_lower) = parse_datetime_string_with_reso("4q1984")
| - [x] closes #50791 (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50949 | 2023-01-24T00:05:04Z | 2023-02-01T19:13:23Z | 2023-02-01T19:13:23Z | 2023-02-01T20:39:45Z |
Handle CoW in BlockManager.apply | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 998c57b66509d..15213b7e82516 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -6142,7 +6142,7 @@ def dtypes(self):
return self._constructor_sliced(data, index=self._info_axis, dtype=np.object_)
def astype(
- self: NDFrameT, dtype, copy: bool_t = True, errors: IgnoreRaise = "raise"
+ self: NDFrameT, dtype, copy: bool_t = None, errors: IgnoreRaise = "raise"
) -> NDFrameT:
"""
Cast a pandas object to a specified dtype ``dtype``.
@@ -6260,6 +6260,11 @@ def astype(
2 2020-01-03
dtype: datetime64[ns]
"""
+ if copy is None:
+ if using_copy_on_write():
+ copy = False
+ else:
+ copy = True
if is_dict_like(dtype):
if self.ndim == 1: # i.e. Series
if len(dtype) > 1 or self.name not in dtype:
@@ -6459,7 +6464,7 @@ def __deepcopy__(self: NDFrameT, memo=None) -> NDFrameT:
return self.copy(deep=True)
@final
- def infer_objects(self: NDFrameT, copy: bool_t = True) -> NDFrameT:
+ def infer_objects(self: NDFrameT, copy: bool_t = None) -> NDFrameT:
"""
Attempt to infer better dtypes for object columns.
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 00ab9d02cee00..c52d380495bea 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -14,6 +14,8 @@
import numpy as np
+from pandas._config import using_copy_on_write
+
from pandas._libs import (
Timestamp,
internals as libinternals,
@@ -414,7 +416,7 @@ def coerce_to_target_dtype(self, other) -> Block:
"""
new_dtype = find_result_type(self.values, other)
- return self.astype(new_dtype, copy=False)
+ return self.astype(new_dtype, copy=False)[0]
@final
def _maybe_downcast(self, blocks: list[Block], downcast=None) -> list[Block]:
@@ -449,13 +451,20 @@ def convert(
self,
*,
copy: bool = True,
+ return_is_view=False,
) -> list[Block]:
"""
attempt to coerce any object types to better types return a copy
of the block (if copy = True) by definition we are not an ObjectBlock
here!
"""
- return [self.copy()] if copy else [self]
+ if using_copy_on_write():
+ result = self.copy(deep=copy)
+ is_view = True
+ else:
+ result = self.copy() if copy else self
+ is_view = False
+ return [(result, is_view)] if return_is_view else [result]
# ---------------------------------------------------------------------
# Array-Like Methods
@@ -496,7 +505,22 @@ def astype(
f"({self.dtype.name} [{self.shape}]) to different shape "
f"({newb.dtype.name} [{newb.shape}])"
)
- return newb
+
+ is_view = False
+ if using_copy_on_write():
+ if not copy:
+ if (
+ isinstance(values.dtype, np.dtype)
+ and isinstance(new_values.dtype, np.dtype)
+ and values is not new_values
+ ):
+ # We certainly made a copy
+ pass
+ else:
+ # We maybe didn't make a copy
+ is_view = True
+
+ return newb, is_view
@final
def to_native_types(self, na_rep: str = "nan", quoting=None, **kwargs) -> Block:
@@ -571,7 +595,7 @@ def replace(
elif self.ndim == 1 or self.shape[0] == 1:
if value is None or value is NA:
- blk = self.astype(np.dtype(object))
+ blk = self.astype(np.dtype(object))[0]
else:
blk = self.coerce_to_target_dtype(value)
return blk.replace(
@@ -753,7 +777,7 @@ def _replace_coerce(
if value is None:
# gh-45601, gh-45836, gh-46634
if mask.any():
- nb = self.astype(np.dtype(object), copy=False)
+ nb = self.astype(np.dtype(object), copy=False)[0]
if nb is self and not inplace:
nb = nb.copy()
putmask_inplace(nb.values, mask, value)
@@ -1995,6 +2019,7 @@ def convert(
self,
*,
copy: bool = True,
+ return_is_view=False,
) -> list[Block]:
"""
attempt to cast any object types to better types return a copy of
@@ -2003,7 +2028,10 @@ def convert(
if self.dtype != _dtype_obj:
# GH#50067 this should be impossible in ObjectBlock, but until
# that is fixed, we short-circuit here.
- return [self]
+ if using_copy_on_write():
+ result = self.copy(deep=False)
+ return [(result, True)] if return_is_view else [result]
+ return [(self, False)] if return_is_view else [self]
values = self.values
if values.ndim == 2:
@@ -2018,10 +2046,16 @@ def convert(
convert_period=True,
convert_interval=True,
)
+ is_view = False
if copy and res_values is values:
res_values = values.copy()
+ elif res_values is values and using_copy_on_write():
+ is_view = True
res_values = ensure_block_shape(res_values, self.ndim)
- return [self.make_block(res_values)]
+ if return_is_view:
+ return [(self.make_block(res_values), is_view)]
+ else:
+ return [self.make_block(res_values)]
# -----------------------------------------------------------------
@@ -2185,16 +2219,16 @@ def extract_pandas_array(
# -----------------------------------------------------------------
-def extend_blocks(result, blocks=None) -> list[Block]:
+def extend_blocks(result, blocks=None, i=0) -> list[Block]:
"""return a new extended blocks, given the result"""
if blocks is None:
blocks = []
if isinstance(result, list):
for r in result:
- if isinstance(r, list):
- blocks.extend(r)
- else:
- blocks.append(r)
+ extend_blocks(r, blocks)
+ elif isinstance(result, tuple):
+ assert isinstance(result[0], Block), type(result[0])
+ blocks.append((result[0], result[1], i))
else:
assert isinstance(result, Block), type(result)
blocks.append(result)
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 1a0aba0778da5..d2defd2524535 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -302,6 +302,7 @@ def apply(
self: T,
f,
align_keys: list[str] | None = None,
+ check_cow: bool = False,
**kwargs,
) -> T:
"""
@@ -327,7 +328,7 @@ def apply(
aligned_args = {k: kwargs[k] for k in align_keys}
- for b in self.blocks:
+ for i, b in enumerate(self.blocks):
if aligned_args:
@@ -347,9 +348,24 @@ def apply(
applied = b.apply(f, **kwargs)
else:
applied = getattr(b, f)(**kwargs)
- result_blocks = extend_blocks(applied, result_blocks)
+ result_blocks = extend_blocks(applied, result_blocks, i)
+
+ refs = None
+ parent = None
+ if check_cow:
+ result_blocks, result_is_view, result_index = list(zip(*result_blocks))
+ if using_copy_on_write():
+ refs = []
+ for b, is_view, i in zip(result_blocks, result_is_view, result_index):
+ if is_view:
+ refs.append(weakref.ref(self.blocks[i]))
+ else:
+ refs.append(None)
- out = type(self).from_blocks(result_blocks, self.axes)
+ if com.any_not_none(refs):
+ parent = self
+
+ out = type(self).from_blocks(result_blocks, self.axes, refs, parent)
return out
def where(self: T, other, cond, align: bool) -> T:
@@ -436,12 +452,28 @@ def fillna(self: T, value, limit, inplace: bool, downcast) -> T:
)
def astype(self: T, dtype, copy: bool = False, errors: str = "raise") -> T:
- return self.apply("astype", dtype=dtype, copy=copy, errors=errors)
+ if copy is None:
+ if using_copy_on_write():
+ copy = False
+ else:
+ copy = True
+
+ return self.apply(
+ "astype", dtype=dtype, copy=copy, errors=errors, check_cow=True
+ )
def convert(self: T, copy: bool) -> T:
+ if copy is None:
+ if using_copy_on_write():
+ copy = False
+ else:
+ copy = True
+
return self.apply(
"convert",
copy=copy,
+ check_cow=True,
+ return_is_view=True,
)
def replace(self: T, to_replace, value, inplace: bool) -> T:
diff --git a/pandas/tests/copy_view/test_methods.py b/pandas/tests/copy_view/test_methods.py
index 0cec5522e39cd..782d6e991fe1e 100644
--- a/pandas/tests/copy_view/test_methods.py
+++ b/pandas/tests/copy_view/test_methods.py
@@ -1137,3 +1137,96 @@ def test_isetitem(using_copy_on_write):
assert np.shares_memory(get_array(df, "c"), get_array(df2, "c"))
else:
assert not np.shares_memory(get_array(df, "c"), get_array(df2, "c"))
+
+
+def test_astype_single_dtype(using_copy_on_write):
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": 1.5})
+ df_orig = df.copy()
+ df2 = df.astype("float64")
+
+ if using_copy_on_write:
+ assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
+ else:
+ assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
+
+ # mutating df2 triggers a copy-on-write for that column/block
+ df2.iloc[0, 2] = 5.5
+ if using_copy_on_write:
+ assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
+ tm.assert_frame_equal(df, df_orig)
+
+
+def test_astype_dict_dtypes(using_copy_on_write):
+ df = DataFrame(
+ {"a": [1, 2, 3], "b": [4, 5, 6], "c": Series([1.5, 1.5, 1.5], dtype="float64")}
+ )
+ df_orig = df.copy()
+ df2 = df.astype({"a": "float64", "c": "float64"})
+
+ if using_copy_on_write:
+ assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
+ assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
+ else:
+ assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
+ assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
+
+ # mutating df2 triggers a copy-on-write for that column/block
+ df2.iloc[0, 2] = 5.5
+ if using_copy_on_write:
+ assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
+
+ df2.iloc[0, 1] = 10
+ if using_copy_on_write:
+ assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
+ tm.assert_frame_equal(df, df_orig)
+
+
+def test_infer_objects(using_copy_on_write):
+ df = DataFrame({"a": [1, 2], "b": "c", "c": 1, "d": "x"})
+ df_orig = df.copy()
+ df2 = df.infer_objects()
+
+ if using_copy_on_write:
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
+ assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
+
+ else:
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
+ assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
+
+ df2.iloc[0, 0] = 0
+ df2.iloc[0, 1] = "d"
+ if using_copy_on_write:
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
+ assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
+ tm.assert_frame_equal(df, df_orig)
+
+
+def test_infer_objects2(using_copy_on_write):
+ df = DataFrame({"a": [1, 2], "b": "x", "c": np.array([1, 2], dtype=object)})
+ df_orig = df.copy()
+ df2 = df.infer_objects()
+
+ if using_copy_on_write:
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
+ assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
+
+ else:
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
+ assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
+
+ df2.iloc[0, 0] = 0
+ df2.iloc[0, 1] = "d"
+ if using_copy_on_write:
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
+ assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
+ tm.assert_frame_equal(df, df_orig)
+
+ # mutate parent
+ df2 = df.infer_objects()
+ df.iloc[0, 0] = 0
+ assert df2.iloc[0, 0] == 1
| @phofl seeing that we need the Block methods be aware of CoW (does it make a copy or not? do we need to keep track of a ref or not?) in several cases, I was thinking of this alternative of how to handle this generally. I don't know if it will turn out to be cleaner if has to handle all different cases, but for now I tested it with astype and infer_objects (https://github.com/pandas-dev/pandas/pull/50802, https://github.com/pandas-dev/pandas/pull/50428).
The idea is that the Block method just tells the manager whether the block that is returned is a view or a copy, and then all the weakref management is centralized in the `BlockManager.apply()` method.
It avoids the "hack" of attaching the `_ref` to the block. Now I am writing this, of course also the in case of using the `_ref` method, the extracting of those refs could be centralized in `apply()` instead of done in both `astype` and `convert`, as it is the case right now in the linked PRs.
I _think_ I find the logic a bit simpler here in this PR, especially for `convert` with its block-splitting logic, but it's also not a big difference (and I am of course biased since I wrote this one, and the logic in your PRs I only read ;))
Note, I just copied some minimal pieces from your PRs to test this, and didn't bother with typing and cleaning it up etc. _If_ we think this might be a suitable path forward, I would also remove those parts here again, and only add the general infrastructure here, and keep astype/convert for its separate PRs. | https://api.github.com/repos/pandas-dev/pandas/pulls/50948 | 2023-01-23T23:58:33Z | 2023-02-08T16:06:30Z | null | 2023-02-08T16:06:36Z |
DEPR: dt64 any/all GH#34479 | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 613a1e82d461f..cd384857153c0 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -638,6 +638,7 @@ Deprecations
- :meth:`Index.holds_integer` has been deprecated. Use :func:`pandas.api.types.infer_dtype` instead (:issue:`50243`)
- :meth:`Index.is_categorical` has been deprecated. Use :func:`pandas.api.types.is_categorical_dtype` instead (:issue:`50042`)
- :meth:`Index.is_interval` has been deprecated. Use :func:`pandas.api.types.is_intterval_dtype` instead (:issue:`50042`)
+- Deprecated ``all`` and ``any`` reductions with ``datetime64`` and :class:`DatetimeTZDtype` dtypes, use e.g. ``(obj != pd.Timestamp(0), tz=obj.tz).all()`` instead (:issue:`34479`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 816d8abec1428..4b26528e6661c 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -2034,11 +2034,12 @@ def ceil(
# Reductions
def any(self, *, axis: AxisInt | None = None, skipna: bool = True) -> bool:
- # GH#34479 discussion of desired behavior long-term
+ # GH#34479 the nanops call will issue a FutureWarning for non-td64 dtype
return nanops.nanany(self._ndarray, axis=axis, skipna=skipna, mask=self.isna())
def all(self, *, axis: AxisInt | None = None, skipna: bool = True) -> bool:
- # GH#34479 discussion of desired behavior long-term
+ # GH#34479 the nanops call will issue a FutureWarning for non-td64 dtype
+
return nanops.nanall(self._ndarray, axis=axis, skipna=skipna, mask=self.isna())
# --------------------------------------------------------------
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index d61668e372e0b..0af851669820e 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -32,6 +32,7 @@
npt,
)
from pandas.compat._optional import import_optional_dependency
+from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
is_any_int_dtype,
@@ -529,6 +530,15 @@ def nanany(
>>> nanops.nanany(s)
False
"""
+ if needs_i8_conversion(values.dtype) and values.dtype.kind != "m":
+ # GH#34479
+ warnings.warn(
+ "'any' with datetime64 dtypes is deprecated and will raise in a "
+ "future version. Use (obj != pd.Timestamp(0)).any() instead.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+
values, _, _, _, _ = _get_values(values, skipna, fill_value=False, mask=mask)
# For object type, any won't necessarily return
@@ -575,6 +585,15 @@ def nanall(
>>> nanops.nanall(s)
False
"""
+ if needs_i8_conversion(values.dtype) and values.dtype.kind != "m":
+ # GH#34479
+ warnings.warn(
+ "'all' with datetime64 dtypes is deprecated and will raise in a "
+ "future version. Use (obj != pd.Timestamp(0)).all() instead.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+
values, _, _, _, _ = _get_values(values, skipna, fill_value=True, mask=mask)
# For object type, all won't necessarily return
diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py
index 2d395a7cbd608..3e6074971352d 100644
--- a/pandas/tests/frame/test_reductions.py
+++ b/pandas/tests/frame/test_reductions.py
@@ -1138,6 +1138,10 @@ def test_any_all_object_dtype(self, axis, bool_agg_func, skipna):
expected = Series([True, True, True, True])
tm.assert_series_equal(result, expected)
+ # GH#50947 deprecates this but it is not emitting a warning in some builds.
+ @pytest.mark.filterwarnings(
+ "ignore:'any' with datetime64 dtypes is deprecated.*:FutureWarning"
+ )
def test_any_datetime(self):
# GH 23070
@@ -1151,6 +1155,7 @@ def test_any_datetime(self):
df = DataFrame({"A": float_data, "B": datetime_data})
result = df.any(axis=1)
+
expected = Series([True, True, True, False])
tm.assert_series_equal(result, expected)
@@ -1245,12 +1250,22 @@ def test_any_all_np_func(self, func, data, expected):
):
getattr(DataFrame(data), func.__name__)(axis=None)
else:
- result = func(data)
+ msg = "'(any|all)' with datetime64 dtypes is deprecated"
+ if data.dtypes.apply(lambda x: x.kind == "M").any():
+ warn = FutureWarning
+ else:
+ warn = None
+
+ with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False):
+ # GH#34479
+ result = func(data)
assert isinstance(result, np.bool_)
assert result.item() is expected
# method version
- result = getattr(DataFrame(data), func.__name__)(axis=None)
+ with tm.assert_produces_warning(warn, match=msg):
+ # GH#34479
+ result = getattr(DataFrame(data), func.__name__)(axis=None)
assert isinstance(result, np.bool_)
assert result.item() is expected
diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py
index 46cbbbd3e6480..e0ae3da482b35 100644
--- a/pandas/tests/reductions/test_reductions.py
+++ b/pandas/tests/reductions/test_reductions.py
@@ -985,27 +985,32 @@ def test_any_all_datetimelike(self):
ser = Series(dta)
df = DataFrame(ser)
- assert dta.all()
- assert dta.any()
+ msg = "'(any|all)' with datetime64 dtypes is deprecated"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ # GH#34479
+ assert dta.all()
+ assert dta.any()
- assert ser.all()
- assert ser.any()
+ assert ser.all()
+ assert ser.any()
- assert df.any().all()
- assert df.all().all()
+ assert df.any().all()
+ assert df.all().all()
dta = dta.tz_localize("UTC")
ser = Series(dta)
df = DataFrame(ser)
- assert dta.all()
- assert dta.any()
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ # GH#34479
+ assert dta.all()
+ assert dta.any()
- assert ser.all()
- assert ser.any()
+ assert ser.all()
+ assert ser.any()
- assert df.any().all()
- assert df.all().all()
+ assert df.any().all()
+ assert df.all().all()
tda = dta - dta[0]
ser = Series(tda)
| - [x] closes #34479 (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50947 | 2023-01-23T23:40:40Z | 2023-01-30T17:55:07Z | 2023-01-30T17:55:07Z | 2023-01-30T18:37:26Z |
Backport PR #50941 on branch 1.5.x (CI set LD_PRELOAD in circleci job) | diff --git a/.circleci/config.yml b/.circleci/config.yml
index 6133037bf3b7d..e7beb78cf6e6d 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -14,7 +14,10 @@ jobs:
steps:
- checkout
- run: .circleci/setup_env.sh
- - run: PATH=$HOME/miniconda3/envs/pandas-dev/bin:$HOME/miniconda3/condabin:$PATH ci/run_tests.sh
+ - run: >
+ PATH=$HOME/miniconda3/envs/pandas-dev/bin:$HOME/miniconda3/condabin:$PATH
+ LD_PRELOAD=$HOME/miniconda3/envs/pandas-dev/lib/libgomp.so.1:$LD_PRELOAD
+ ci/run_tests.sh
workflows:
test:
| Backport PR #50941: CI set LD_PRELOAD in circleci job | https://api.github.com/repos/pandas-dev/pandas/pulls/50943 | 2023-01-23T17:57:15Z | 2023-01-28T20:06:15Z | 2023-01-28T20:06:15Z | 2023-01-28T20:06:16Z |
CI set LD_PRELOAD in circleci job | diff --git a/.circleci/config.yml b/.circleci/config.yml
index 6133037bf3b7d..e7beb78cf6e6d 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -14,7 +14,10 @@ jobs:
steps:
- checkout
- run: .circleci/setup_env.sh
- - run: PATH=$HOME/miniconda3/envs/pandas-dev/bin:$HOME/miniconda3/condabin:$PATH ci/run_tests.sh
+ - run: >
+ PATH=$HOME/miniconda3/envs/pandas-dev/bin:$HOME/miniconda3/condabin:$PATH
+ LD_PRELOAD=$HOME/miniconda3/envs/pandas-dev/lib/libgomp.so.1:$LD_PRELOAD
+ ci/run_tests.sh
workflows:
test:
| circle CI job is failing, let's see if this fixes anything
```python-traceback
=================================== FAILURES ===================================
______________________________ test_scikit_learn _______________________________
[gw2] linux -- Python 3.8.15 /home/circleci/miniconda3/envs/pandas-dev/bin/python3.8
def test_scikit_learn():
> sklearn = import_module("sklearn") # noqa:F841
pandas/tests/test_downstream.py:158:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
pandas/tests/test_downstream.py:27: in import_module
return importlib.import_module(name)
../miniconda3/envs/pandas-dev/lib/python3.8/importlib/__init__.py:127: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
<frozen importlib._bootstrap>:1014: in _gcd_import
???
<frozen importlib._bootstrap>:991: in _find_and_load
???
<frozen importlib._bootstrap>:975: in _find_and_load_unlocked
???
<frozen importlib._bootstrap>:671: in _load_unlocked
???
<frozen importlib._bootstrap_external>:843: in exec_module
???
<frozen importlib._bootstrap>:219: in _call_with_frames_removed
???
../miniconda3/envs/pandas-dev/lib/python3.8/site-packages/sklearn/__init__.py:83: in <module>
from .utils._show_versions import show_versions
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
"""
Utility methods to print system info for debugging
adapted from :func:`pandas.show_versions`
"""
# License: BSD 3 clause
import platform
import sys
from ..utils.fixes import threadpool_info
from .. import __version__
> from ._openmp_helpers import _openmp_parallelism_enabled
E ImportError: /home/circleci/miniconda3/envs/pandas-dev/lib/python3.8/site-packages/sklearn/utils/../../../../libgomp.so.1: cannot allocate memory in static TLS block
../miniconda3/envs/pandas-dev/lib/python3.8/site-packages/sklearn/utils/_show_versions.py:14: ImportError
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/50941 | 2023-01-23T14:54:11Z | 2023-01-23T17:56:34Z | 2023-01-23T17:56:34Z | 2023-01-23T17:56:44Z |
DEPR: move NumericIndex._engine_type and .inferred_type to Index | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 2948bb81d0b6a..e9380f815e2c1 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -386,11 +386,26 @@ def _outer_indexer(
_attributes: list[str] = ["name"]
_can_hold_strings: bool = True
+ _engine_types: dict[np.dtype | ExtensionDtype, type[libindex.IndexEngine]] = {
+ np.dtype(np.int8): libindex.Int8Engine,
+ np.dtype(np.int16): libindex.Int16Engine,
+ np.dtype(np.int32): libindex.Int32Engine,
+ np.dtype(np.int64): libindex.Int64Engine,
+ np.dtype(np.uint8): libindex.UInt8Engine,
+ np.dtype(np.uint16): libindex.UInt16Engine,
+ np.dtype(np.uint32): libindex.UInt32Engine,
+ np.dtype(np.uint64): libindex.UInt64Engine,
+ np.dtype(np.float32): libindex.Float32Engine,
+ np.dtype(np.float64): libindex.Float64Engine,
+ np.dtype(np.complex64): libindex.Complex64Engine,
+ np.dtype(np.complex128): libindex.Complex128Engine,
+ }
+
@property
def _engine_type(
self,
) -> type[libindex.IndexEngine] | type[libindex.ExtensionEngine]:
- return libindex.ObjectEngine
+ return self._engine_types.get(self.dtype, libindex.ObjectEngine)
# whether we support partial string indexing. Overridden
# in DatetimeIndex and PeriodIndex
@@ -2545,6 +2560,13 @@ def inferred_type(self) -> str_t:
"""
Return a string of the type inferred from the values.
"""
+ if isinstance(self.dtype, np.dtype) and self.dtype.kind in "iufc": # fastpath
+ return {
+ "i": "integer",
+ "u": "integer",
+ "f": "floating",
+ "c": "complex",
+ }[self.dtype.kind]
return lib.infer_dtype(self._values, skipna=False)
@cache_readonly
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index 68bdd1893c77f..6834fdcdf1f99 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -4,7 +4,6 @@
import numpy as np
-from pandas._libs import index as libindex
from pandas._typing import Dtype
from pandas.util._decorators import (
cache_readonly,
@@ -74,36 +73,6 @@ class NumericIndex(Index):
)
_can_hold_strings = False
- _engine_types: dict[np.dtype, type[libindex.IndexEngine]] = {
- np.dtype(np.int8): libindex.Int8Engine,
- np.dtype(np.int16): libindex.Int16Engine,
- np.dtype(np.int32): libindex.Int32Engine,
- np.dtype(np.int64): libindex.Int64Engine,
- np.dtype(np.uint8): libindex.UInt8Engine,
- np.dtype(np.uint16): libindex.UInt16Engine,
- np.dtype(np.uint32): libindex.UInt32Engine,
- np.dtype(np.uint64): libindex.UInt64Engine,
- np.dtype(np.float32): libindex.Float32Engine,
- np.dtype(np.float64): libindex.Float64Engine,
- np.dtype(np.complex64): libindex.Complex64Engine,
- np.dtype(np.complex128): libindex.Complex128Engine,
- }
-
- @property
- def _engine_type(self) -> type[libindex.IndexEngine]:
- # error: Invalid index type "Union[dtype[Any], ExtensionDtype]" for
- # "Dict[dtype[Any], Type[IndexEngine]]"; expected type "dtype[Any]"
- return self._engine_types[self.dtype] # type: ignore[index]
-
- @cache_readonly
- def inferred_type(self) -> str:
- return {
- "i": "integer",
- "u": "integer",
- "f": "floating",
- "c": "complex",
- }[self.dtype.kind]
-
def __new__(
cls, data=None, dtype: Dtype | None = None, copy: bool = False, name=None
) -> NumericIndex:
| Moves`_engine_type` & `inferred_type` from `NumericIndex`to `Index` in preparation to remove `NumericIndex` and include numpy int/uint/float64 in the base `Index`.
xref #42717. | https://api.github.com/repos/pandas-dev/pandas/pulls/50940 | 2023-01-23T14:00:09Z | 2023-01-25T17:01:27Z | 2023-01-25T17:01:27Z | 2023-01-25T17:10:34Z |
Datetime parsing (PDEP-4): allow mixture of ISO formatted strings | diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index ec082cb90e75c..3b06fa1b5517a 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -1001,14 +1001,23 @@ way to parse dates is to explicitly set ``format=``.
)
df
-In the case that you have mixed datetime formats within the same column, you'll need to
-first read it in as an object dtype and then apply :func:`to_datetime` to each element.
+In the case that you have mixed datetime formats within the same column, you can
+pass ``format='mixed'``
.. ipython:: python
data = io.StringIO("date\n12 Jan 2000\n2000-01-13\n")
df = pd.read_csv(data)
- df['date'] = df['date'].apply(pd.to_datetime)
+ df['date'] = pd.to_datetime(df['date'], format='mixed')
+ df
+
+or, if your datetime formats are all ISO8601 (possibly not identically-formatted):
+
+.. ipython:: python
+
+ data = io.StringIO("date\n2020-01-01\n2020-01-01 03:00\n")
+ df = pd.read_csv(data)
+ df['date'] = pd.to_datetime(df['date'], format='ISO8601')
df
.. ipython:: python
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index d1b965e64e43b..d009225f06018 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -311,6 +311,8 @@ Other enhancements
- Added :meth:`DatetimeIndex.as_unit` and :meth:`TimedeltaIndex.as_unit` to convert to different resolutions; supported resolutions are "s", "ms", "us", and "ns" (:issue:`50616`)
- Added :meth:`Series.dt.unit` and :meth:`Series.dt.as_unit` to convert to different resolutions; supported resolutions are "s", "ms", "us", and "ns" (:issue:`51223`)
- Added new argument ``dtype`` to :func:`read_sql` to be consistent with :func:`read_sql_query` (:issue:`50797`)
+- :func:`to_datetime` now accepts ``"ISO8601"`` as an argument to ``format``, which will match any ISO8601 string (but possibly not identically-formatted) (:issue:`50411`)
+- :func:`to_datetime` now accepts ``"mixed"`` as an argument to ``format``, which will infer the format for each element individually (:issue:`50972`)
- Added new argument ``engine`` to :func:`read_json` to support parsing JSON with pyarrow by specifying ``engine="pyarrow"`` (:issue:`48893`)
- Added support for SQLAlchemy 2.0 (:issue:`40686`)
- :class:`Index` set operations :meth:`Index.union`, :meth:`Index.intersection`, :meth:`Index.difference`, and :meth:`Index.symmetric_difference` now support ``sort=True``, which will always return a sorted result, unlike the default ``sort=None`` which does not sort in some cases (:issue:`25151`)
@@ -738,11 +740,16 @@ In the past, :func:`to_datetime` guessed the format for each element independent
Note that this affects :func:`read_csv` as well.
-If you still need to parse dates with inconsistent formats, you'll need to apply :func:`to_datetime`
-to each element individually, e.g. ::
+If you still need to parse dates with inconsistent formats, you can use
+``format='mixed`` (possibly alongside ``dayfirst``) ::
ser = pd.Series(['13-01-2000', '12 January 2000'])
- ser.apply(pd.to_datetime)
+ pd.to_datetime(ser, format='mixed', dayfirst=True)
+
+or, if your formats are all ISO8601 (but possibly not identically-formatted) ::
+
+ ser = pd.Series(['2020-01-01', '2020-01-01 03:00'])
+ pd.to_datetime(ser, format='ISO8601')
.. _whatsnew_200.api_breaking.other:
diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx
index bb06c65597987..cf847746f16cd 100644
--- a/pandas/_libs/tslibs/strptime.pyx
+++ b/pandas/_libs/tslibs/strptime.pyx
@@ -186,6 +186,7 @@ def array_strptime(
bint iso_format = format_is_iso(fmt)
NPY_DATETIMEUNIT out_bestunit
int out_local = 0, out_tzoffset = 0
+ bint string_to_dts_succeeded = 0
assert is_raise or is_ignore or is_coerce
@@ -306,44 +307,54 @@ def array_strptime(
else:
val = str(val)
- if iso_format:
- string_to_dts_failed = string_to_dts(
+ if fmt == "ISO8601":
+ string_to_dts_succeeded = not string_to_dts(
+ val, &dts, &out_bestunit, &out_local,
+ &out_tzoffset, False, None, False
+ )
+ elif iso_format:
+ string_to_dts_succeeded = not string_to_dts(
val, &dts, &out_bestunit, &out_local,
&out_tzoffset, False, fmt, exact
)
- if not string_to_dts_failed:
- # No error reported by string_to_dts, pick back up
- # where we left off
- value = npy_datetimestruct_to_datetime(NPY_FR_ns, &dts)
- if out_local == 1:
- # Store the out_tzoffset in seconds
- # since we store the total_seconds of
- # dateutil.tz.tzoffset objects
- tz = timezone(timedelta(minutes=out_tzoffset))
- result_timezone[i] = tz
- out_local = 0
- out_tzoffset = 0
- iresult[i] = value
- check_dts_bounds(&dts)
- continue
+ if string_to_dts_succeeded:
+ # No error reported by string_to_dts, pick back up
+ # where we left off
+ value = npy_datetimestruct_to_datetime(NPY_FR_ns, &dts)
+ if out_local == 1:
+ # Store the out_tzoffset in seconds
+ # since we store the total_seconds of
+ # dateutil.tz.tzoffset objects
+ tz = timezone(timedelta(minutes=out_tzoffset))
+ result_timezone[i] = tz
+ out_local = 0
+ out_tzoffset = 0
+ iresult[i] = value
+ check_dts_bounds(&dts)
+ continue
if parse_today_now(val, &iresult[i], utc):
continue
# Some ISO formats can't be parsed by string_to_dts
- # For example, 6-digit YYYYMD. So, if there's an error,
- # try the string-matching code below.
+ # For example, 6-digit YYYYMD. So, if there's an error, and a format
+ # was specified, then try the string-matching code below. If the format
+ # specified was 'ISO8601', then we need to error, because
+ # only string_to_dts handles mixed ISO8601 formats.
+ if not string_to_dts_succeeded and fmt == "ISO8601":
+ raise ValueError(f"Time data {val} is not ISO8601 format")
# exact matching
if exact:
found = format_regex.match(val)
if not found:
- raise ValueError(f"time data \"{val}\" doesn't "
- f"match format \"{fmt}\"")
+ raise ValueError(
+ f"time data \"{val}\" doesn't match format \"{fmt}\""
+ )
if len(val) != found.end():
raise ValueError(
- f"unconverted data remains: "
- f'"{val[found.end():]}"'
+ "unconverted data remains when parsing with "
+ f"format \"{fmt}\": \"{val[found.end():]}\""
)
# search
@@ -351,8 +362,7 @@ def array_strptime(
found = format_regex.search(val)
if not found:
raise ValueError(
- f"time data \"{val}\" doesn't match "
- f"format \"{fmt}\""
+ f"time data \"{val}\" doesn't match format \"{fmt}\""
)
iso_year = -1
@@ -504,7 +514,15 @@ def array_strptime(
result_timezone[i] = tz
except (ValueError, OutOfBoundsDatetime) as ex:
- ex.args = (f"{str(ex)}, at position {i}",)
+ ex.args = (
+ f"{str(ex)}, at position {i}. You might want to try:\n"
+ " - passing `format` if your strings have a consistent format;\n"
+ " - passing `format='ISO8601'` if your strings are "
+ "all ISO8601 but not necessarily in exactly the same format;\n"
+ " - passing `format='mixed'`, and the format will be "
+ "inferred for each element individually. "
+ "You might want to use `dayfirst` alongside this.",
+ )
if is_coerce:
iresult[i] = NPY_NAT
continue
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 3006bc6290ff7..b917f2de61343 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -445,7 +445,8 @@ def _convert_listlike_datetimes(
if format is None:
format = _guess_datetime_format_for_array(arg, dayfirst=dayfirst)
- if format is not None:
+ # `format` could be inferred, or user didn't ask for mixed-format parsing.
+ if format is not None and format != "mixed":
return _array_strptime_with_fallback(arg, name, utc, format, exact, errors)
result, tz_parsed = objects_to_datetime64ns(
@@ -687,7 +688,7 @@ def to_datetime(
yearfirst: bool = False,
utc: bool = False,
format: str | None = None,
- exact: bool = True,
+ exact: bool | lib.NoDefault = lib.no_default,
unit: str | None = None,
infer_datetime_format: lib.NoDefault | bool = lib.no_default,
origin: str = "unix",
@@ -717,9 +718,7 @@ def to_datetime(
.. warning::
``dayfirst=True`` is not strict, but will prefer to parse
- with day first. If a delimited date string cannot be parsed in
- accordance with the given `dayfirst` option, e.g.
- ``to_datetime(['31-12-2021'])``, then a warning will be shown.
+ with day first.
yearfirst : bool, default False
Specify a date parse order if `arg` is str or is list-like.
@@ -759,6 +758,12 @@ def to_datetime(
<https://docs.python.org/3/library/datetime.html
#strftime-and-strptime-behavior>`_ for more information on choices, though
note that :const:`"%f"` will parse all the way up to nanoseconds.
+ You can also pass:
+
+ - "ISO8601", to parse any `ISO8601 <https://en.wikipedia.org/wiki/ISO_8601>`_
+ time string (not necessarily in exactly the same format);
+ - "mixed", to infer the format for each element individually. This is risky,
+ and you should probably use it along with `dayfirst`.
exact : bool, default True
Control how `format` is used:
@@ -766,6 +771,7 @@ def to_datetime(
- If :const:`False`, allow the `format` to match anywhere in the target
string.
+ Cannot be used alongside ``format='ISO8601'`` or ``format='mixed'``.
unit : str, default 'ns'
The unit of the arg (D,s,ms,us,ns) denote the unit, which is an
integer or float number. This will be based off the origin.
@@ -997,6 +1003,8 @@ def to_datetime(
DatetimeIndex(['2018-10-26 12:00:00+00:00', '2020-01-01 18:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
"""
+ if exact is not lib.no_default and format in {"mixed", "ISO8601"}:
+ raise ValueError("Cannot use 'exact' when 'format' is 'mixed' or 'ISO8601'")
if infer_datetime_format is not lib.no_default:
warnings.warn(
"The argument 'infer_datetime_format' is deprecated and will "
diff --git a/pandas/tests/io/parser/test_parse_dates.py b/pandas/tests/io/parser/test_parse_dates.py
index 09a2967d62fee..edae696b84bf4 100644
--- a/pandas/tests/io/parser/test_parse_dates.py
+++ b/pandas/tests/io/parser/test_parse_dates.py
@@ -1721,7 +1721,8 @@ def test_parse_multiple_delimited_dates_with_swap_warnings():
with pytest.raises(
ValueError,
match=(
- r'^time data "31/05/2000" doesn\'t match format "%m/%d/%Y", at position 1$'
+ r'^time data "31/05/2000" doesn\'t match format "%m/%d/%Y", '
+ r"at position 1. You might want to try:"
),
):
pd.to_datetime(["01/01/2000", "31/05/2000", "31/05/2001", "01/02/2000"])
diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py
index afe237d35076c..71f2cae49fe41 100644
--- a/pandas/tests/tools/test_to_datetime.py
+++ b/pandas/tests/tools/test_to_datetime.py
@@ -48,6 +48,16 @@
from pandas.core.tools.datetimes import start_caching_at
from pandas.util.version import Version
+PARSING_ERR_MSG = (
+ r"You might want to try:\n"
+ r" - passing `format` if your strings have a consistent format;\n"
+ r" - passing `format=\'ISO8601\'` if your strings are all ISO8601 "
+ r"but not necessarily in exactly the same format;\n"
+ r" - passing `format=\'mixed\'`, and the format will be inferred "
+ r"for each element individually. You might want to use `dayfirst` "
+ r"alongside this."
+)
+
@pytest.fixture(params=[True, False])
def cache(request):
@@ -133,7 +143,11 @@ def test_to_datetime_format_YYYYMMDD_with_nat(self, cache):
ser2 = ser.apply(str)
ser2[2] = "nat"
with pytest.raises(
- ValueError, match='unconverted data remains: ".0", at position 0'
+ ValueError,
+ match=(
+ 'unconverted data remains when parsing with format "%Y%m%d": ".0", '
+ "at position 0"
+ ),
):
# https://github.com/pandas-dev/pandas/issues/50051
to_datetime(ser2, format="%Y%m%d", cache=cache)
@@ -527,8 +541,10 @@ def test_to_datetime_parse_timezone_malformed(self, offset):
msg = "|".join(
[
- r'^time data ".*" doesn\'t match format ".*", at position 0$',
- r'^unconverted data remains: ".*", at position 0$',
+ r'^time data ".*" doesn\'t match format ".*", at position 0. '
+ f"{PARSING_ERR_MSG}$",
+ r'^unconverted data remains when parsing with format ".*": ".*", '
+ f"at position 0. {PARSING_ERR_MSG}$",
]
)
with pytest.raises(ValueError, match=msg):
@@ -1294,7 +1310,10 @@ def test_datetime_bool_arrays_mixed(self, cache):
to_datetime([False, datetime.today()], cache=cache)
with pytest.raises(
ValueError,
- match=r'^time data "True" doesn\'t match format "%Y%m%d", at position 1$',
+ match=(
+ r'^time data "True" doesn\'t match format "%Y%m%d", '
+ f"at position 1. {PARSING_ERR_MSG}$"
+ ),
):
to_datetime(["20130101", True], cache=cache)
tm.assert_index_equal(
@@ -1335,9 +1354,11 @@ def test_datetime_invalid_scalar(self, value, format, warning):
msg = "|".join(
[
- r'^time data "a" doesn\'t match format "%H:%M:%S", at position 0$',
+ r'^time data "a" doesn\'t match format "%H:%M:%S", at position 0. '
+ f"{PARSING_ERR_MSG}$",
r'^Given date string "a" not likely a datetime, at position 0$',
- r'^unconverted data remains: "9", at position 0$',
+ r'^unconverted data remains when parsing with format "%H:%M:%S": "9", '
+ f"at position 0. {PARSING_ERR_MSG}$",
r"^second must be in 0..59: 00:01:99, at position 0$",
]
)
@@ -1360,7 +1381,7 @@ def test_datetime_outofbounds_scalar(self, value, format, warning):
assert res is NaT
if format is not None:
- msg = r'^time data ".*" doesn\'t match format ".*", at position 0$'
+ msg = r'^time data ".*" doesn\'t match format ".*", at position 0.'
with pytest.raises(ValueError, match=msg):
to_datetime(value, errors="raise", format=format)
else:
@@ -1387,8 +1408,10 @@ def test_datetime_invalid_index(self, values, format, warning):
msg = "|".join(
[
r'^Given date string "a" not likely a datetime, at position 0$',
- r'^time data "a" doesn\'t match format "%H:%M:%S", at position 0$',
- r'^unconverted data remains: "9", at position 0$',
+ r'^time data "a" doesn\'t match format "%H:%M:%S", at position 0. '
+ f"{PARSING_ERR_MSG}$",
+ r'^unconverted data remains when parsing with format "%H:%M:%S": "9", '
+ f"at position 0. {PARSING_ERR_MSG}$",
r"^second must be in 0..59: 00:01:99, at position 0$",
]
)
@@ -2092,7 +2115,7 @@ def test_dataframe_coerce(self, cache):
msg = (
r'^cannot assemble the datetimes: time data ".+" doesn\'t '
- r'match format "%Y%m%d", at position 1$'
+ r'match format "%Y%m%d", at position 1\.'
)
with pytest.raises(ValueError, match=msg):
to_datetime(df2, cache=cache)
@@ -2169,8 +2192,8 @@ def test_dataframe_float(self, cache):
# float
df = DataFrame({"year": [2000, 2001], "month": [1.5, 1], "day": [1, 1]})
msg = (
- r"^cannot assemble the datetimes: unconverted data remains: "
- r'"1", at position 0$'
+ r"^cannot assemble the datetimes: unconverted data remains when parsing "
+ r'with format ".*": "1", at position 0.'
)
with pytest.raises(ValueError, match=msg):
to_datetime(df, cache=cache)
@@ -2252,8 +2275,10 @@ def test_to_datetime_iso8601_exact_fails(self, input, format):
# `format` is shorter than the date string, so only fails with `exact=True`
msg = "|".join(
[
- '^unconverted data remains: ".*", at position 0$',
- 'time data ".*" doesn\'t match format ".*", at position 0',
+ '^unconverted data remains when parsing with format ".*": ".*"'
+ f", at position 0. {PARSING_ERR_MSG}$",
+ f'^time data ".*" doesn\'t match format ".*", at position 0. '
+ f"{PARSING_ERR_MSG}$",
]
)
with pytest.raises(
@@ -2386,7 +2411,10 @@ def test_to_datetime_on_datetime64_series(self, cache):
def test_to_datetime_with_space_in_series(self, cache):
# GH 6428
ser = Series(["10/18/2006", "10/18/2008", " "])
- msg = r'^time data " " doesn\'t match format "%m/%d/%Y", at position 2$'
+ msg = (
+ r'^time data " " doesn\'t match format "%m/%d/%Y", '
+ rf"at position 2. {PARSING_ERR_MSG}$"
+ )
with pytest.raises(ValueError, match=msg):
to_datetime(ser, errors="raise", cache=cache)
result_coerce = to_datetime(ser, errors="coerce", cache=cache)
@@ -2661,7 +2689,7 @@ def test_dayfirst_warnings_invalid_input(self):
ValueError,
match=(
r'^time data "03/30/2011" doesn\'t match format '
- r'"%d/%m/%Y", at position 1$'
+ rf'"%d/%m/%Y", at position 1. {PARSING_ERR_MSG}$'
),
):
to_datetime(arr, dayfirst=True)
@@ -2732,7 +2760,7 @@ def test_to_datetime_inconsistent_format(self, cache):
ser = Series(np.array(data))
msg = (
r'^time data "01-02-2011 00:00:00" doesn\'t match format '
- r'"%m/%d/%Y %H:%M:%S", at position 1$'
+ rf'"%m/%d/%Y %H:%M:%S", at position 1. {PARSING_ERR_MSG}$'
)
with pytest.raises(ValueError, match=msg):
to_datetime(ser, cache=cache)
@@ -2861,33 +2889,34 @@ def test_day_not_in_month_raise(self, cache):
(
"2015-02-29",
"%Y-%m-%d",
- "^day is out of range for month, at position 0$",
+ f"^day is out of range for month, at position 0. {PARSING_ERR_MSG}$",
),
(
"2015-29-02",
"%Y-%d-%m",
- "^day is out of range for month, at position 0$",
+ f"^day is out of range for month, at position 0. {PARSING_ERR_MSG}$",
),
(
"2015-02-32",
"%Y-%m-%d",
- '^unconverted data remains: "2", at position 0$',
+ '^unconverted data remains when parsing with format "%Y-%m-%d": "2", '
+ f"at position 0. {PARSING_ERR_MSG}$",
),
(
"2015-32-02",
"%Y-%d-%m",
'^time data "2015-32-02" doesn\'t match format "%Y-%d-%m", '
- "at position 0$",
+ f"at position 0. {PARSING_ERR_MSG}$",
),
(
"2015-04-31",
"%Y-%m-%d",
- "^day is out of range for month, at position 0$",
+ f"^day is out of range for month, at position 0. {PARSING_ERR_MSG}$",
),
(
"2015-31-04",
"%Y-%d-%m",
- "^day is out of range for month, at position 0$",
+ f"^day is out of range for month, at position 0. {PARSING_ERR_MSG}$",
),
],
)
@@ -3304,9 +3333,7 @@ def test_incorrect_value_exception(self):
)
def test_to_datetime_out_of_bounds_with_format_arg(self, format, warning):
# see gh-23830
- msg = (
- r"^Out of bounds nanosecond timestamp: 2417-10-10 00:00:00, at position 0$"
- )
+ msg = r"^Out of bounds nanosecond timestamp: 2417-10-10 00:00:00, at position 0"
with pytest.raises(OutOfBoundsDatetime, match=msg):
with tm.assert_produces_warning(warning, match="Could not infer format"):
to_datetime("2417-10-10 00:00:00", format=format)
@@ -3536,3 +3563,46 @@ def test_to_datetime_format_f_parse_nanos():
nanosecond=789,
)
assert result == expected
+
+
+def test_to_datetime_mixed_iso8601():
+ # https://github.com/pandas-dev/pandas/issues/50411
+ result = to_datetime(["2020-01-01", "2020-01-01 05:00:00"], format="ISO8601")
+ expected = DatetimeIndex(["2020-01-01 00:00:00", "2020-01-01 05:00:00"])
+ tm.assert_index_equal(result, expected)
+
+
+def test_to_datetime_mixed_other():
+ # https://github.com/pandas-dev/pandas/issues/50411
+ result = to_datetime(["01/11/2000", "12 January 2000"], format="mixed")
+ expected = DatetimeIndex(["2000-01-11", "2000-01-12"])
+ tm.assert_index_equal(result, expected)
+
+
+@pytest.mark.parametrize("exact", [True, False])
+@pytest.mark.parametrize("format", ["ISO8601", "mixed"])
+def test_to_datetime_mixed_or_iso_exact(exact, format):
+ msg = "Cannot use 'exact' when 'format' is 'mixed' or 'ISO8601'"
+ with pytest.raises(ValueError, match=msg):
+ to_datetime(["2020-01-01"], exact=exact, format=format)
+
+
+def test_to_datetime_mixed_not_necessarily_iso8601_raise():
+ # https://github.com/pandas-dev/pandas/issues/50411
+ with pytest.raises(
+ ValueError, match="Time data 01-01-2000 is not ISO8601 format, at position 1"
+ ):
+ to_datetime(["2020-01-01", "01-01-2000"], format="ISO8601")
+
+
+@pytest.mark.parametrize(
+ ("errors", "expected"),
+ [
+ ("coerce", DatetimeIndex(["2020-01-01 00:00:00", NaT])),
+ ("ignore", Index(["2020-01-01", "01-01-2000"])),
+ ],
+)
+def test_to_datetime_mixed_not_necessarily_iso8601_coerce(errors, expected):
+ # https://github.com/pandas-dev/pandas/issues/50411
+ result = to_datetime(["2020-01-01", "01-01-2000"], format="ISO8601", errors=errors)
+ tm.assert_index_equal(result, expected)
diff --git a/web/pandas/pdeps/0004-consistent-to-datetime-parsing.md b/web/pandas/pdeps/0004-consistent-to-datetime-parsing.md
index 7635fabe2dbc6..3a020aa736a5e 100644
--- a/web/pandas/pdeps/0004-consistent-to-datetime-parsing.md
+++ b/web/pandas/pdeps/0004-consistent-to-datetime-parsing.md
@@ -4,7 +4,7 @@
- Status: Accepted
- Discussion: [#48621](https://github.com/pandas-dev/pandas/pull/48621)
- Author: [Marco Gorelli](https://github.com/MarcoGorelli)
-- Revision: 1
+- Revision: 2
## Abstract
@@ -58,11 +58,13 @@ Concretely, the suggestion is:
If a user has dates in a mixed format, they can still use flexible parsing and accept
the risks that poses, e.g.:
```ipython
-In [3]: pd.Series(['12-01-2000 00:00:00', '13-01-2000 00:00:00']).apply(pd.to_datetime)
-Out[3]:
-0 2000-12-01
-1 2000-01-13
-dtype: datetime64[ns]
+In [3]: pd.to_datetime(['12-01-2000 00:00:00', '13-01-2000 00:00:00'], format='mixed')
+Out[3]: DatetimeIndex(['2000-12-01', '2000-01-13'], dtype='datetime64[ns]', freq=None)
+```
+or, if their dates are all ISO8601,
+```ipython
+In [4]: pd.to_datetime(['2020-01-01', '2020-01-01 03:00'], format='ISO8601')
+Out[4]: DatetimeIndex(['2020-01-01 00:00:00', '2020-01-01 03:00:00'], dtype='datetime64[ns]', freq=None)
```
## Usage and Impact
@@ -99,3 +101,4 @@ We could make ``guess_datetime_format`` smarter by using a random sample of elem
### PDEP History
- 18 September 2022: Initial draft
+- 25 January 2023: Amended to mention ``format='ISO8601'`` and ``format='mixed'`` options
| - [ ] closes #50411 (Replace xxxx with the GitHub issue number)
- [ ] closes #50972
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50939 | 2023-01-23T11:30:23Z | 2023-02-14T10:44:26Z | 2023-02-14T10:44:26Z | 2023-02-15T00:05:45Z |
Resolved duplicate issue by updating obj object | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index ce123c704ba33..cc6e7406eff49 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -4545,9 +4545,10 @@ def drop(
if inplace:
self._update_inplace(obj)
- return None
- else:
- return obj
+ elif obj is not self:
+ self._update_inplace(obj)
+
+ return obj
@final
def _drop_axis(
| - [x] closes #50845
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50938 | 2023-01-23T09:44:10Z | 2023-03-05T12:49:22Z | null | 2023-03-05T12:49:23Z |
DOC: Fix dark mode styles | diff --git a/doc/source/_static/css/getting_started.css b/doc/source/_static/css/getting_started.css
index 2a348e5b84e6e..2eb69beb05cb5 100644
--- a/doc/source/_static/css/getting_started.css
+++ b/doc/source/_static/css/getting_started.css
@@ -236,11 +236,11 @@ ul.task-bullet > li > p:first-child {
.tutorial-card .card-header {
cursor: pointer;
- background-color: white;
+ background-color: transparent;
}
.tutorial-card .card-body {
- background-color: #F0F0F0;
+ background-color: transparent;
}
.tutorial-card .badge {
| closes #48527 | https://api.github.com/repos/pandas-dev/pandas/pulls/50936 | 2023-01-23T05:28:22Z | 2023-03-09T16:31:48Z | 2023-03-09T16:31:48Z | 2023-03-09T16:32:04Z |
CI: Adjust test to fix wheel builders | diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml
index 12e8a89cd0382..665aeb6bc7f87 100644
--- a/.github/workflows/wheels.yml
+++ b/.github/workflows/wheels.yml
@@ -86,7 +86,8 @@ jobs:
activate-environment: test
channels: conda-forge, anaconda
channel-priority: true
- mamba-version: "*"
+ # mamba fails to solve, also we really don't need this since we're just installing python
+ # mamba-version: "*"
- name: Test wheels (Windows 64-bit only)
if: ${{ matrix.buildplat[1] == 'win_amd64' }}
@@ -154,7 +155,8 @@ jobs:
python-version: '3.8'
channels: conda-forge
channel-priority: true
- mamba-version: "*"
+ # mamba fails to solve, also we really don't need this since we're just installing python
+ # mamba-version: "*"
- name: Build sdist
run: |
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index 0a9ed30dd55d6..69b9cbb7d6a26 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -1802,10 +1802,12 @@ def test_encoding_latin1_118(self, datapath):
so the fallback encoding of latin-1 is being used. This can happen when a file
has been incorrectly encoded by Stata or some other software. You should verify
the string values returned are correct."""
+ # Move path outside of read_stata, or else assert_produces_warning
+ # will block pytests skip mechanism from triggering (failing the test)
+ # if the path is not present
+ path = datapath("io", "data", "stata", "stata1_encoding_118.dta")
with tm.assert_produces_warning(UnicodeWarning) as w:
- encoded = read_stata(
- datapath("io", "data", "stata", "stata1_encoding_118.dta")
- )
+ encoded = read_stata(path)
assert len(w) == 151
assert w[0].message.args[0] == msg
| - [ ] closes #50898 (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50934 | 2023-01-22T15:32:45Z | 2023-01-26T19:15:14Z | 2023-01-26T19:15:14Z | 2023-01-26T23:03:46Z |
DEPR: move NumericIndex._convert_tolerance to Index | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 2948bb81d0b6a..04a0afedc1a75 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -3780,6 +3780,17 @@ def _convert_tolerance(self, tolerance, target: np.ndarray | Index) -> np.ndarra
tolerance = np.asarray(tolerance)
if target.size != tolerance.size and tolerance.size > 1:
raise ValueError("list-like tolerance size must match target index size")
+ elif is_numeric_dtype(self) and not np.issubdtype(tolerance.dtype, np.number):
+ if tolerance.ndim > 0:
+ raise ValueError(
+ f"tolerance argument for {type(self).__name__} with dtype "
+ f"{self.dtype} must contain numeric elements if it is list type"
+ )
+
+ raise ValueError(
+ f"tolerance argument for {type(self).__name__} with dtype {self.dtype} "
+ f"must be numeric if it is a scalar: {repr(tolerance)}"
+ )
return tolerance
@final
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index 68bdd1893c77f..cd621b64c886e 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -221,22 +221,6 @@ def _maybe_cast_slice_bound(self, label, side: str):
# ----------------------------------------------------------------
- def _convert_tolerance(self, tolerance, target):
- tolerance = super()._convert_tolerance(tolerance, target)
-
- if not np.issubdtype(tolerance.dtype, np.number):
- if tolerance.ndim > 0:
- raise ValueError(
- f"tolerance argument for {type(self).__name__} must contain "
- "numeric elements if it is list type"
- )
-
- raise ValueError(
- f"tolerance argument for {type(self).__name__} must be numeric "
- f"if it is a scalar: {repr(tolerance)}"
- )
- return tolerance
-
@classmethod
def _assert_safe_casting(cls, data: np.ndarray, subarr: np.ndarray) -> None:
"""
| Moves`_convert_tolerance` from `NumericIndex`to `Index` in preparation to remove `NumericIndex` and include numpy int/uint/float64 in the base `Index`.
xref #42717. | https://api.github.com/repos/pandas-dev/pandas/pulls/50932 | 2023-01-22T09:18:19Z | 2023-01-25T20:51:45Z | 2023-01-25T20:51:44Z | 2023-01-25T21:12:28Z |
DEPR: move NumericIndex._format_native_types to Index | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index fc2c51166a737..b05b7cae8042e 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -1291,11 +1291,30 @@ def _format_with_header(self, header: list[str_t], na_rep: str_t) -> list[str_t]
return header + result
def _format_native_types(
- self, *, na_rep: str_t = "", quoting=None, **kwargs
+ self,
+ *,
+ na_rep: str_t = "",
+ decimal: str_t = ".",
+ float_format=None,
+ date_format=None,
+ quoting=None,
) -> npt.NDArray[np.object_]:
"""
Actually format specific types of the index.
"""
+ from pandas.io.formats.format import FloatArrayFormatter
+
+ if is_float_dtype(self.dtype) and not is_extension_array_dtype(self.dtype):
+ formatter = FloatArrayFormatter(
+ self._values,
+ na_rep=na_rep,
+ float_format=float_format,
+ decimal=decimal,
+ quoting=quoting,
+ fixed_width=False,
+ )
+ return formatter.get_result_as_array()
+
mask = isna(self)
if not self.is_object() and not quoting:
values = np.asarray(self).astype(str)
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index fab741ec1ba18..68bdd1893c77f 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -5,10 +5,7 @@
import numpy as np
from pandas._libs import index as libindex
-from pandas._typing import (
- Dtype,
- npt,
-)
+from pandas._typing import Dtype
from pandas.util._decorators import (
cache_readonly,
doc,
@@ -251,33 +248,3 @@ def _assert_safe_casting(cls, data: np.ndarray, subarr: np.ndarray) -> None:
if is_integer_dtype(subarr.dtype):
if not np.array_equal(data, subarr):
raise TypeError("Unsafe NumPy casting, you must explicitly cast")
-
- def _format_native_types(
- self,
- *,
- na_rep: str = "",
- float_format=None,
- decimal: str = ".",
- quoting=None,
- **kwargs,
- ) -> npt.NDArray[np.object_]:
- from pandas.io.formats.format import FloatArrayFormatter
-
- if is_float_dtype(self.dtype):
- formatter = FloatArrayFormatter(
- self._values,
- na_rep=na_rep,
- float_format=float_format,
- decimal=decimal,
- quoting=quoting,
- fixed_width=False,
- )
- return formatter.get_result_as_array()
-
- return super()._format_native_types(
- na_rep=na_rep,
- float_format=float_format,
- decimal=decimal,
- quoting=quoting,
- **kwargs,
- )
| This starts moving functionality from `NumericIndex`to `Index` in preparation to remove `NumericIndex` and include numpy int/uint/float64 in the base `Index`.
Also removes the `**kwargs`, as that wasn't used in the method.
I'm doing one method per PR, figuring that is easier to review. I could add ore per PR if you want.
xref #42717. | https://api.github.com/repos/pandas-dev/pandas/pulls/50931 | 2023-01-22T09:10:57Z | 2023-01-23T15:43:11Z | 2023-01-23T15:43:11Z | 2023-01-23T20:19:45Z |
BUG/PERF: Series(index=MultiIndex).rename losing EA dtypes | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 0ceda331de790..9f68a9ebb43b8 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -898,6 +898,7 @@ Performance improvements
- Performance improvements to :func:`read_sas` (:issue:`47403`, :issue:`47405`, :issue:`47656`, :issue:`48502`)
- Memory improvement in :meth:`RangeIndex.sort_values` (:issue:`48801`)
- Performance improvement in :meth:`Series.to_numpy` if ``copy=True`` by avoiding copying twice (:issue:`24345`)
+- Performance improvement in :meth:`Series.rename` with :class:`MultiIndex` (:issue:`21055`)
- Performance improvement in :class:`DataFrameGroupBy` and :class:`SeriesGroupBy` when ``by`` is a categorical type and ``sort=False`` (:issue:`48976`)
- Performance improvement in :class:`DataFrameGroupBy` and :class:`SeriesGroupBy` when ``by`` is a categorical type and ``observed=False`` (:issue:`49596`)
- Performance improvement in :func:`read_stata` with parameter ``index_col`` set to ``None`` (the default). Now the index will be a :class:`RangeIndex` instead of :class:`Int64Index` (:issue:`49745`)
@@ -1018,6 +1019,7 @@ Indexing
- Bug in :meth:`DataFrame.iloc` raising ``IndexError`` when indexer is a :class:`Series` with numeric extension array dtype (:issue:`49521`)
- Bug in :func:`~DataFrame.describe` when formatting percentiles in the resulting index showed more decimals than needed (:issue:`46362`)
- Bug in :meth:`DataFrame.compare` does not recognize differences when comparing ``NA`` with value in nullable dtypes (:issue:`48939`)
+- Bug in :meth:`Series.rename` with :class:`MultiIndex` losing extension array dtypes (:issue:`21055`)
- Bug in :meth:`DataFrame.isetitem` coercing extension array dtypes in :class:`DataFrame` to object (:issue:`49922`)
- Bug in :class:`BusinessHour` would cause creation of :class:`DatetimeIndex` to fail when no opening hour was included in the index (:issue:`49835`)
-
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 4d2e4758817be..55edd3f9f9069 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -6026,15 +6026,13 @@ def _transform_index(self, func, *, level=None) -> Index:
Only apply function to one level of the MultiIndex if level is specified.
"""
if isinstance(self, ABCMultiIndex):
- if level is not None:
- # Caller is responsible for ensuring level is positional.
- items = [
- tuple(func(y) if i == level else y for i, y in enumerate(x))
- for x in self
- ]
- else:
- items = [tuple(func(y) for y in x) for x in self]
- return type(self).from_tuples(items, names=self.names)
+ values = [
+ self.get_level_values(i).map(func)
+ if i == level or level is None
+ else self.get_level_values(i)
+ for i in range(self.nlevels)
+ ]
+ return type(self).from_arrays(values)
else:
items = [func(x) for x in self]
return Index(items, name=self.name, tupleize_cols=False)
diff --git a/pandas/tests/series/methods/test_rename.py b/pandas/tests/series/methods/test_rename.py
index d0392929cb082..93c4fbb7f3c46 100644
--- a/pandas/tests/series/methods/test_rename.py
+++ b/pandas/tests/series/methods/test_rename.py
@@ -136,6 +136,25 @@ def test_rename_series_with_multiindex(self):
tm.assert_series_equal(result, series_expected)
+ def test_rename_series_with_multiindex_keeps_ea_dtypes(self):
+ # GH21055
+ arrays = [
+ Index([1, 2, 3], dtype="Int64").astype("category"),
+ Index([1, 2, 3], dtype="Int64"),
+ ]
+ mi = MultiIndex.from_arrays(arrays, names=["A", "B"])
+ ser = Series(1, index=mi)
+ result = ser.rename({1: 4}, level=1)
+
+ arrays_expected = [
+ Index([1, 2, 3], dtype="Int64").astype("category"),
+ Index([4, 2, 3], dtype="Int64"),
+ ]
+ mi_expected = MultiIndex.from_arrays(arrays_expected, names=["A", "B"])
+ expected = Series(1, index=mi_expected)
+
+ tm.assert_series_equal(result, expected)
+
def test_rename_error_arg(self):
# GH 46889
ser = Series(["foo", "bar"])
| - [x] closes #21055
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/v2.0.0.rst` file if fixing a bug or adding a new feature.
`Series(index=MultiIndex).rename` loses EA dtypes:
```
import pandas as pd
import numpy as np
lev0 = pd.Index(np.arange(1000), dtype="Int64").astype("category")
lev1 = pd.Index(np.arange(1000), dtype="Int64")
mi = pd.MultiIndex.from_product([lev0, lev1], names=["A", "B"])
ser1 = pd.Series(1, index=mi)
ser2 = ser1.rename({10: 11}, level=1)
print(ser2.index.dtypes)
```
main:
```
A int64
B int64
dtype: object
```
PR:
```
A category
B Int64
dtype: object
```
Perf improves as well:
```
%timeit ser1.rename({10: 11}, level=1)
1.25 s ± 52.8 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) -> main
350 ms ± 8.29 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) -> PR
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/50930 | 2023-01-22T02:04:09Z | 2023-01-23T18:29:01Z | 2023-01-23T18:29:01Z | 2023-02-23T01:38:38Z |
ENH: support min/max/sum for pyarrow duration dtypes | diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py
index ddb2f01898ec7..7edd2fa585156 100644
--- a/pandas/core/arrays/arrow/array.py
+++ b/pandas/core/arrays/arrow/array.py
@@ -1032,6 +1032,9 @@ def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
not_eq = pc.not_equal(data_to_cmp, 0)
data_to_reduce = not_eq
+ elif name in ["min", "max", "sum"] and pa.types.is_duration(pa_type):
+ data_to_reduce = self._data.cast(pa.int64())
+
if name == "sem":
def pyarrow_meth(data, skip_nulls, **kwargs):
@@ -1066,6 +1069,9 @@ def pyarrow_meth(data, skip_nulls, **kwargs):
raise TypeError(msg) from err
if pc.is_null(result).as_py():
return self.dtype.na_value
+
+ if name in ["min", "max", "sum"] and pa.types.is_duration(pa_type):
+ result = result.cast(pa_type)
return result.as_py()
def __setitem__(self, key, value) -> None:
diff --git a/pandas/tests/extension/base/groupby.py b/pandas/tests/extension/base/groupby.py
index 3a9dbe9dfb384..200a494997116 100644
--- a/pandas/tests/extension/base/groupby.py
+++ b/pandas/tests/extension/base/groupby.py
@@ -137,6 +137,7 @@ def test_in_numeric_groupby(self, data_for_grouping):
or is_string_dtype(dtype)
or is_period_dtype(dtype)
or is_object_dtype(dtype)
+ or dtype.kind == "m" # in particular duration[*][pyarrow]
):
expected = pd.Index(["B", "C"])
result = df.groupby("A").sum().columns
diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py
index af3952a532113..71fc7e17bf808 100644
--- a/pandas/tests/extension/test_arrow.py
+++ b/pandas/tests/extension/test_arrow.py
@@ -539,10 +539,6 @@ def test_reduce_series(self, data, all_numeric_reductions, skipna, request):
"sem",
] and pa.types.is_temporal(pa_dtype):
request.node.add_marker(xfail_mark)
- elif all_numeric_reductions in ["sum", "min", "max"] and pa.types.is_duration(
- pa_dtype
- ):
- request.node.add_marker(xfail_mark)
elif pa.types.is_boolean(pa_dtype) and all_numeric_reductions in {
"sem",
"std",
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50928 | 2023-01-21T23:39:56Z | 2023-01-26T19:19:06Z | 2023-01-26T19:19:06Z | 2023-01-26T19:20:41Z |
ENH: support cumsum with pyarrow durations | diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py
index e2a74ea6f5351..aafd4cbeeaedf 100644
--- a/pandas/core/arrays/arrow/array.py
+++ b/pandas/core/arrays/arrow/array.py
@@ -985,7 +985,18 @@ def _accumulate(
pyarrow_meth = getattr(pc, pyarrow_name, None)
if pyarrow_meth is None:
return super()._accumulate(name, skipna=skipna, **kwargs)
- result = pyarrow_meth(self._data, skip_nulls=skipna, **kwargs)
+
+ data_to_accum = self._data
+
+ pa_dtype = data_to_accum.type
+ if pa.types.is_duration(pa_dtype):
+ data_to_accum = data_to_accum.cast(pa.int64())
+
+ result = pyarrow_meth(data_to_accum, skip_nulls=skipna, **kwargs)
+
+ if pa.types.is_duration(pa_dtype):
+ result = result.cast(pa_dtype)
+
return type(self)(result)
def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py
index a7c243cdfe74f..b453503467662 100644
--- a/pandas/tests/extension/test_arrow.py
+++ b/pandas/tests/extension/test_arrow.py
@@ -372,16 +372,27 @@ def test_getitem_scalar(self, data):
class TestBaseAccumulateTests(base.BaseAccumulateTests):
- def check_accumulate(self, s, op_name, skipna):
- result = getattr(s, op_name)(skipna=skipna).astype("Float64")
- expected = getattr(s.astype("Float64"), op_name)(skipna=skipna)
+ def check_accumulate(self, ser, op_name, skipna):
+ result = getattr(ser, op_name)(skipna=skipna)
+
+ if ser.dtype.kind == "m":
+ # Just check that we match the integer behavior.
+ ser = ser.astype("int64[pyarrow]")
+ result = result.astype("int64[pyarrow]")
+
+ result = result.astype("Float64")
+ expected = getattr(ser.astype("Float64"), op_name)(skipna=skipna)
self.assert_series_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize("skipna", [True, False])
def test_accumulate_series_raises(self, data, all_numeric_accumulations, skipna):
pa_type = data.dtype.pyarrow_dtype
if (
- (pa.types.is_integer(pa_type) or pa.types.is_floating(pa_type))
+ (
+ pa.types.is_integer(pa_type)
+ or pa.types.is_floating(pa_type)
+ or pa.types.is_duration(pa_type)
+ )
and all_numeric_accumulations == "cumsum"
and not pa_version_under9p0
):
@@ -423,9 +434,7 @@ def test_accumulate_series(self, data, all_numeric_accumulations, skipna, reques
raises=NotImplementedError,
)
)
- elif all_numeric_accumulations == "cumsum" and (
- pa.types.is_duration(pa_type) or pa.types.is_boolean(pa_type)
- ):
+ elif all_numeric_accumulations == "cumsum" and (pa.types.is_boolean(pa_type)):
request.node.add_marker(
pytest.mark.xfail(
reason=f"{all_numeric_accumulations} not implemented for {pa_type}",
| fixes 8 xfails | https://api.github.com/repos/pandas-dev/pandas/pulls/50927 | 2023-01-21T20:34:23Z | 2023-01-24T18:54:17Z | 2023-01-24T18:54:17Z | 2023-01-24T19:02:21Z |
API: Harmonize dtype for index levels for Series.sparse.from_coo | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 0ceda331de790..0ea8a5db4afb3 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -600,6 +600,7 @@ Other API changes
methods to get a full slice (for example ``df.loc[:]`` or ``df[:]``) (:issue:`49469`)
- Disallow computing ``cumprod`` for :class:`Timedelta` object; previously this returned incorrect values (:issue:`50246`)
- Loading a JSON file with duplicate columns using ``read_json(orient='split')`` renames columns to avoid duplicates, as :func:`read_csv` and the other readers do (:issue:`50370`)
+- The levels of the index of the :class:`Series` returned from ``Series.sparse.from_coo`` now always have dtype ``int32``. Previously they had dtype ``int64`` (:issue:`50926`)
- :func:`to_datetime` with ``unit`` of either "Y" or "M" will now raise if a sequence contains a non-round ``float`` value, matching the ``Timestamp`` behavior (:issue:`50301`)
-
diff --git a/pandas/core/arrays/sparse/scipy_sparse.py b/pandas/core/arrays/sparse/scipy_sparse.py
index 3ef5ba5835d79..8e3ba57cbaba4 100644
--- a/pandas/core/arrays/sparse/scipy_sparse.py
+++ b/pandas/core/arrays/sparse/scipy_sparse.py
@@ -203,9 +203,6 @@ def coo_to_sparse_series(
ser = ser.sort_index()
ser = ser.astype(SparseDtype(ser.dtype))
if dense_index:
- # is there a better constructor method to use here?
- i = range(A.shape[0])
- j = range(A.shape[1])
- ind = MultiIndex.from_product([i, j])
+ ind = MultiIndex.from_product([A.row, A.col])
ser = ser.reindex(ind)
return ser
diff --git a/pandas/tests/arrays/sparse/test_accessor.py b/pandas/tests/arrays/sparse/test_accessor.py
index 9ac0d9d0401ed..7d6a9e18a26c6 100644
--- a/pandas/tests/arrays/sparse/test_accessor.py
+++ b/pandas/tests/arrays/sparse/test_accessor.py
@@ -218,14 +218,11 @@ def test_series_from_coo(self, dtype, dense_index):
A = scipy.sparse.eye(3, format="coo", dtype=dtype)
result = pd.Series.sparse.from_coo(A, dense_index=dense_index)
- # TODO: GH49560: scipy.sparse.eye always has A.row and A.col dtype as int32.
- # fix index_dtype to follow scipy.sparse convention (always int32)?
- index_dtype = np.int64 if dense_index else np.int32
index = pd.MultiIndex.from_tuples(
[
- np.array([0, 0], dtype=index_dtype),
- np.array([1, 1], dtype=index_dtype),
- np.array([2, 2], dtype=index_dtype),
+ np.array([0, 0], dtype=np.int32),
+ np.array([1, 1], dtype=np.int32),
+ np.array([2, 2], dtype=np.int32),
],
)
expected = pd.Series(SparseArray(np.array([1, 1, 1], dtype=dtype)), index=index)
| Currently the levels of the index the sparse series has different dtype (int32 or int64), depending on if the `dense_index` is true or False. This PR makes the dtype to be always int32, the same as the row/col attributes of the scipy `coo_matrix`.
See also discussion here: https://github.com/pandas-dev/pandas/pull/49560#discussion_r1067619162.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50926 | 2023-01-21T20:17:30Z | 2023-01-23T18:50:41Z | 2023-01-23T18:50:41Z | 2023-01-23T18:53:53Z |
TST: Remove fsspec internals from tests | diff --git a/pandas/tests/io/test_fsspec.py b/pandas/tests/io/test_fsspec.py
index 14a7d77773f29..a5790bb456d44 100644
--- a/pandas/tests/io/test_fsspec.py
+++ b/pandas/tests/io/test_fsspec.py
@@ -50,10 +50,8 @@ def test_read_csv(cleared_fs, df1):
def test_reasonable_error(monkeypatch, cleared_fs):
- from fsspec import registry
from fsspec.registry import known_implementations
- registry.target.clear()
with pytest.raises(ValueError, match="nosuchprotocol"):
read_csv("nosuchprotocol://test/test.csv")
err_msg = "test error message"
diff --git a/pandas/tests/io/test_gcs.py b/pandas/tests/io/test_gcs.py
index e3333025da547..a609c1b5fc03d 100644
--- a/pandas/tests/io/test_gcs.py
+++ b/pandas/tests/io/test_gcs.py
@@ -22,17 +22,12 @@
@pytest.fixture
def gcs_buffer(monkeypatch):
"""Emulate GCS using a binary buffer."""
- from fsspec import (
- AbstractFileSystem,
- registry,
- )
-
- registry.target.clear() # remove state
+ import fsspec
gcs_buffer = BytesIO()
gcs_buffer.close = lambda: True
- class MockGCSFileSystem(AbstractFileSystem):
+ class MockGCSFileSystem(fsspec.AbstractFileSystem):
@staticmethod
def open(*args, **kwargs):
gcs_buffer.seek(0)
@@ -42,7 +37,8 @@ def ls(self, path, **kwargs):
# needed for pyarrow
return [{"name": path, "type": "file"}]
- monkeypatch.setattr("gcsfs.GCSFileSystem", MockGCSFileSystem)
+ # Overwrites the default implementation from gcsfs to our mock class
+ fsspec.register_implementation("gs", MockGCSFileSystem, clobber=True)
return gcs_buffer
@@ -55,9 +51,6 @@ def test_to_read_gcs(gcs_buffer, format):
GH 33987
"""
- from fsspec import registry
-
- registry.target.clear() # remove state
df1 = DataFrame(
{
@@ -132,9 +125,6 @@ def test_to_csv_compression_encoding_gcs(gcs_buffer, compression_only, encoding)
GH 35677 (to_csv, compression), GH 26124 (to_csv, encoding), and
GH 32392 (read_csv, encoding)
"""
- from fsspec import registry
-
- registry.target.clear() # remove state
df = tm.makeDataFrame()
# reference of compressed and encoded file
@@ -174,12 +164,8 @@ def test_to_csv_compression_encoding_gcs(gcs_buffer, compression_only, encoding)
@td.skip_if_no("gcsfs")
def test_to_parquet_gcs_new_file(monkeypatch, tmpdir):
"""Regression test for writing to a not-yet-existent GCS Parquet file."""
- from fsspec import (
- AbstractFileSystem,
- registry,
- )
+ from fsspec import AbstractFileSystem
- registry.target.clear() # remove state
df1 = DataFrame(
{
"int": [1, 3],
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
I don't think clearing the registry does anything. The public registry var is supposed to be immutable, and we were just clearing the internal registry dict.
However, we didn't remove gcs from the known_implementations dict, so gcs would've technically still been in the registry IIUC this code correctly.
https://github.com/fsspec/filesystem_spec/blob/0f3ecd8e629043646ab19b1a2b00d895f0553a81/fsspec/registry.py#L206-L209
| https://api.github.com/repos/pandas-dev/pandas/pulls/50925 | 2023-01-21T16:34:16Z | 2023-01-22T01:02:40Z | 2023-01-22T01:02:40Z | 2023-01-28T02:24:19Z |
Rename .github/workflows/wheels.yml to config.yml | diff --git a/.github/workflows/wheels.yml b/config.yml
similarity index 100%
rename from .github/workflows/wheels.yml
rename to config.yml
| - [x] closes #xxxx (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50922 | 2023-01-21T13:23:21Z | 2023-01-23T18:32:26Z | null | 2023-01-23T18:32:26Z |
cirlcle.ci | diff --git a/circle.ci b/circle.ci
new file mode 100644
index 0000000000000..8b137891791fe
--- /dev/null
+++ b/circle.ci
@@ -0,0 +1 @@
+
diff --git a/circle.yml b/circle.yml
new file mode 100644
index 0000000000000..14f9774c76ecf
--- /dev/null
+++ b/circle.yml
@@ -0,0 +1,21 @@
+
+[tool.ruff]
+line-length = 88
+update-check = false
+target-version = "py38"
+
+select = [
+ # pyflakes
+ "F",
+ # pycodestyle
+ "E",
+ "W",
+ # flake8-2020
+ "YTT",
+ # flake8-bugbear
+ "B",
+ # flake8-quotes
+ "Q",
+ # pylint
+ "PLE", "PLR", "PLW",
+]
diff --git a/config.yml.txt b/config.yml.txt
new file mode 100644
index 0000000000000..d5aa98a8b520a
--- /dev/null
+++ b/config.yml.txt
@@ -0,0 +1,25 @@
+# Use the latest 2.1 version of CircleCI pipeline process engine.
+# See: https://circleci.com/docs/2.0/configuration-reference
+version: 2.1
+
+# Orbs are reusable packages of CircleCI configuration that you may share across projects, enabling you to create encapsulated, parameterized commands, jobs, and executors that can be used across multiple projects.
+# See: https://circleci.com/docs/2.0/orb-intro/
+orbs:
+ node: circleci/node@4.1
+ # The Node.js orb contains a set of prepackaged CircleCI configuration you can utilize
+ # See the orb's test job here: https://circleci.com/developer/orbs/orb/circleci/node#jobs-test
+
+# Invoke jobs via workflows
+# See: https://circleci.com/docs/2.0/configuration-reference/#workflows
+workflows:
+ sample: # This is the name of the workflow, feel free to change it to better match your workflow.
+ # Inside the workflow, you define the jobs you want to run.
+ jobs:
+ - node/test:
+ matrix:
+ parameters:
+ version: ["15.1", "lts", "12.21"]
+ # For more information about matrix testing see the detailed blog post:
+ # https://circleci.com/blog/circleci-matrix-jobs/
+ # or the configuration reference:
+ # https://circleci.com/docs/2.0/configuration-reference/?section=reference#matrix-requires-version-21
| - [x] closes #xxxx (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50921 | 2023-01-21T13:03:59Z | 2023-01-23T18:31:56Z | null | 2023-01-23T18:31:56Z |
Circleci project setup | diff --git a/.circleci/config.yml b/.circleci/config.yml
index 6133037bf3b7d..6554e1f4e8c08 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -1,22 +1,26 @@
-version: 2.1
-
-jobs:
- test-arm:
- machine:
- image: ubuntu-2004:202101-01
- resource_class: arm.large
- environment:
- ENV_FILE: ci/deps/circle-38-arm64.yaml
- PYTEST_WORKERS: auto
- PATTERN: "not single_cpu and not slow and not network and not clipboard and not arm_slow and not db"
- PYTEST_TARGET: "pandas"
- PANDAS_CI: "1"
- steps:
- - checkout
- - run: .circleci/setup_env.sh
- - run: PATH=$HOME/miniconda3/envs/pandas-dev/bin:$HOME/miniconda3/condabin:$PATH ci/run_tests.sh
-
-workflows:
- test:
- jobs:
- - test-arm
+# Use the latest 2.1 version of CircleCI pipeline process engine.
+# See: https://circleci.com/docs/2.0/configuration-reference
+version: 2.1
+
+# Define a job to be invoked later in a workflow.
+# See: https://circleci.com/docs/2.0/configuration-reference/#jobs
+jobs:
+ say-hello:
+ # Specify the execution environment. You can specify an image from Dockerhub or use one of our Convenience Images from CircleCI's Developer Hub.
+ # See: https://circleci.com/docs/2.0/configuration-reference/#docker-machine-macos-windows-executor
+ docker:
+ - image: cimg/base:stable
+ # Add steps to the job
+ # See: https://circleci.com/docs/2.0/configuration-reference/#steps
+ steps:
+ - checkout
+ - run:
+ name: "Say hello"
+ command: "echo Hello, World!"
+
+# Invoke jobs via workflows
+# See: https://circleci.com/docs/2.0/configuration-reference/#workflows
+workflows:
+ say-hello-workflow:
+ jobs:
+ - say-hello
| - [x] closes #xxxx (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50919 | 2023-01-21T09:55:10Z | 2023-01-23T18:31:36Z | null | 2023-01-23T18:31:37Z |
ENH: Optimize replace to avoid copying when not necessary | diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 1aba48371b430..7ff2bde39beb1 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -552,6 +552,7 @@ def replace(
inplace: bool = False,
# mask may be pre-computed if we're called from replace_list
mask: npt.NDArray[np.bool_] | None = None,
+ using_cow: bool = False,
) -> list[Block]:
"""
replace the to_replace value with value, possible to create new
@@ -566,7 +567,12 @@ def replace(
if isinstance(values, Categorical):
# TODO: avoid special-casing
# GH49404
- blk = self if inplace else self.copy()
+ if using_cow and (self.refs.has_reference() or not inplace):
+ blk = self.copy()
+ elif using_cow:
+ blk = self.copy(deep=False)
+ else:
+ blk = self if inplace else self.copy()
values = cast(Categorical, blk.values)
values._replace(to_replace=to_replace, value=value, inplace=True)
return [blk]
@@ -576,22 +582,36 @@ def replace(
# replacing it is a no-op.
# Note: If to_replace were a list, NDFrame.replace would call
# replace_list instead of replace.
- return [self] if inplace else [self.copy()]
+ if using_cow:
+ return [self.copy(deep=False)]
+ else:
+ return [self] if inplace else [self.copy()]
if mask is None:
mask = missing.mask_missing(values, to_replace)
if not mask.any():
# Note: we get here with test_replace_extension_other incorrectly
# bc _can_hold_element is incorrect.
- return [self] if inplace else [self.copy()]
+ if using_cow:
+ return [self.copy(deep=False)]
+ else:
+ return [self] if inplace else [self.copy()]
elif self._can_hold_element(value):
- blk = self if inplace else self.copy()
+ # TODO(CoW): Maybe split here as well into columns where mask has True
+ # and rest?
+ if using_cow:
+ if inplace:
+ blk = self.copy(deep=self.refs.has_reference())
+ else:
+ blk = self.copy()
+ else:
+ blk = self if inplace else self.copy()
putmask_inplace(blk.values, mask, value)
if not (self.is_object and value is None):
# if the user *explicitly* gave None, we keep None, otherwise
# may downcast to NaN
- blocks = blk.convert(copy=False)
+ blocks = blk.convert(copy=False, using_cow=using_cow)
else:
blocks = [blk]
return blocks
@@ -619,6 +639,7 @@ def replace(
value=value,
inplace=True,
mask=mask[i : i + 1],
+ using_cow=using_cow,
)
)
return blocks
@@ -797,7 +818,10 @@ def _replace_coerce(
return [nb]
return [self] if inplace else [self.copy()]
return self.replace(
- to_replace=to_replace, value=value, inplace=inplace, mask=mask
+ to_replace=to_replace,
+ value=value,
+ inplace=inplace,
+ mask=mask,
)
# ---------------------------------------------------------------------
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index cb32b3bbc6cc7..9ecb77ad782b4 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -459,7 +459,11 @@ def replace(self: T, to_replace, value, inplace: bool) -> T:
assert not is_list_like(to_replace)
assert not is_list_like(value)
return self.apply(
- "replace", to_replace=to_replace, value=value, inplace=inplace
+ "replace",
+ to_replace=to_replace,
+ value=value,
+ inplace=inplace,
+ using_cow=using_copy_on_write(),
)
def replace_regex(self, **kwargs):
diff --git a/pandas/tests/copy_view/test_methods.py b/pandas/tests/copy_view/test_methods.py
index c88210dec3c09..7042d6e4f9478 100644
--- a/pandas/tests/copy_view/test_methods.py
+++ b/pandas/tests/copy_view/test_methods.py
@@ -1210,44 +1210,6 @@ def test_items(using_copy_on_write):
assert df.loc[0, name] == 0
-@pytest.mark.parametrize(
- "replace_kwargs",
- [
- {"to_replace": {"a": 1, "b": 4}, "value": -1},
- # Test CoW splits blocks to avoid copying unchanged columns
- {"to_replace": {"a": 1}, "value": -1},
- {"to_replace": {"b": 4}, "value": -1},
- {"to_replace": {"b": {4: 1}}},
- # TODO: Add these in a further optimization
- # We would need to see which columns got replaced in the mask
- # which could be expensive
- # {"to_replace": {"b": 1}},
- # 1
- ],
-)
-def test_replace(using_copy_on_write, replace_kwargs):
- df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": ["foo", "bar", "baz"]})
- df_orig = df.copy()
-
- df_replaced = df.replace(**replace_kwargs)
-
- if using_copy_on_write:
- if (df_replaced["b"] == df["b"]).all():
- assert np.shares_memory(get_array(df_replaced, "b"), get_array(df, "b"))
- assert np.shares_memory(get_array(df_replaced, "c"), get_array(df, "c"))
-
- # mutating squeezed df triggers a copy-on-write for that column/block
- df_replaced.loc[0, "c"] = -1
- if using_copy_on_write:
- assert not np.shares_memory(get_array(df_replaced, "c"), get_array(df, "c"))
-
- if "a" in replace_kwargs["to_replace"]:
- arr = get_array(df_replaced, "a")
- df_replaced.loc[0, "a"] = 100
- assert np.shares_memory(get_array(df_replaced, "a"), arr)
- tm.assert_frame_equal(df, df_orig)
-
-
@pytest.mark.parametrize("dtype", ["int64", "Int64"])
def test_putmask(using_copy_on_write, dtype):
df = DataFrame({"a": [1, 2], "b": 1, "c": 2}, dtype=dtype)
diff --git a/pandas/tests/copy_view/test_replace.py b/pandas/tests/copy_view/test_replace.py
index de7278dca06ff..7cd197541ac33 100644
--- a/pandas/tests/copy_view/test_replace.py
+++ b/pandas/tests/copy_view/test_replace.py
@@ -9,34 +9,194 @@
from pandas.tests.copy_view.util import get_array
-def test_replace_categorical_inplace_reference(using_copy_on_write):
- df = DataFrame({"a": Categorical([1, 2, 3])})
+@pytest.mark.parametrize(
+ "replace_kwargs",
+ [
+ {"to_replace": {"a": 1, "b": 4}, "value": -1},
+ # Test CoW splits blocks to avoid copying unchanged columns
+ {"to_replace": {"a": 1}, "value": -1},
+ {"to_replace": {"b": 4}, "value": -1},
+ {"to_replace": {"b": {4: 1}}},
+ # TODO: Add these in a further optimization
+ # We would need to see which columns got replaced in the mask
+ # which could be expensive
+ # {"to_replace": {"b": 1}},
+ # 1
+ ],
+)
+def test_replace(using_copy_on_write, replace_kwargs):
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": ["foo", "bar", "baz"]})
+ df_orig = df.copy()
+
+ df_replaced = df.replace(**replace_kwargs)
+
+ if using_copy_on_write:
+ if (df_replaced["b"] == df["b"]).all():
+ assert np.shares_memory(get_array(df_replaced, "b"), get_array(df, "b"))
+ assert np.shares_memory(get_array(df_replaced, "c"), get_array(df, "c"))
+
+ # mutating squeezed df triggers a copy-on-write for that column/block
+ df_replaced.loc[0, "c"] = -1
+ if using_copy_on_write:
+ assert not np.shares_memory(get_array(df_replaced, "c"), get_array(df, "c"))
+
+ if "a" in replace_kwargs["to_replace"]:
+ arr = get_array(df_replaced, "a")
+ df_replaced.loc[0, "a"] = 100
+ assert np.shares_memory(get_array(df_replaced, "a"), arr)
+ tm.assert_frame_equal(df, df_orig)
+
+
+def test_replace_mask_all_false_second_block(using_copy_on_write):
+ df = DataFrame({"a": [1.5, 2, 3], "b": 100.5, "c": 1, "d": 2})
+ df_orig = df.copy()
+
+ df2 = df.replace(to_replace=1.5, value=55.5)
+
+ if using_copy_on_write:
+ # TODO: Block splitting would allow us to avoid copying b
+ assert np.shares_memory(get_array(df, "c"), get_array(df2, "c"))
+ assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
+
+ else:
+ assert not np.shares_memory(get_array(df, "c"), get_array(df2, "c"))
+ assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
+
+ df2.loc[0, "c"] = 1
+ tm.assert_frame_equal(df, df_orig) # Original is unchanged
+
+ if using_copy_on_write:
+ assert not np.shares_memory(get_array(df, "c"), get_array(df2, "c"))
+ # TODO: This should split and not copy the whole block
+ # assert np.shares_memory(get_array(df, "d"), get_array(df2, "d"))
+
+
+def test_replace_coerce_single_column(using_copy_on_write, using_array_manager):
+ df = DataFrame({"a": [1.5, 2, 3], "b": 100.5})
df_orig = df.copy()
+
+ df2 = df.replace(to_replace=1.5, value="a")
+
+ if using_copy_on_write:
+ assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
+ assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
+
+ elif not using_array_manager:
+ assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
+ assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
+
+ if using_copy_on_write:
+ df2.loc[0, "b"] = 0.5
+ tm.assert_frame_equal(df, df_orig) # Original is unchanged
+ assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
+
+
+def test_replace_to_replace_wrong_dtype(using_copy_on_write):
+ df = DataFrame({"a": [1.5, 2, 3], "b": 100.5})
+ df_orig = df.copy()
+
+ df2 = df.replace(to_replace="xxx", value=1.5)
+
+ if using_copy_on_write:
+ assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
+ assert np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
+
+ else:
+ assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
+ assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
+
+ df2.loc[0, "b"] = 0.5
+ tm.assert_frame_equal(df, df_orig) # Original is unchanged
+
+ if using_copy_on_write:
+ assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
+
+
+def test_replace_inplace(using_copy_on_write):
+ df = DataFrame({"a": [1.5, 2, 3]})
+ arr_a = get_array(df, "a")
+ df.replace(to_replace=1.5, value=15.5, inplace=True)
+
+ assert np.shares_memory(get_array(df, "a"), arr_a)
+ if using_copy_on_write:
+ assert df._mgr._has_no_reference(0)
+
+
+@pytest.mark.parametrize("to_replace", [1.5, [1.5]])
+def test_replace_inplace_reference(using_copy_on_write, to_replace):
+ df = DataFrame({"a": [1.5, 2, 3]})
arr_a = get_array(df, "a")
view = df[:]
- df.replace(to_replace=[1], value=2, inplace=True)
+ df.replace(to_replace=to_replace, value=15.5, inplace=True)
if using_copy_on_write:
- assert not np.shares_memory(get_array(df, "a").codes, arr_a.codes)
+ assert not np.shares_memory(get_array(df, "a"), arr_a)
assert df._mgr._has_no_reference(0)
assert view._mgr._has_no_reference(0)
- tm.assert_frame_equal(view, df_orig)
else:
- assert np.shares_memory(get_array(df, "a").codes, arr_a.codes)
+ assert np.shares_memory(get_array(df, "a"), arr_a)
-def test_replace_inplace_reference(using_copy_on_write):
+@pytest.mark.parametrize("to_replace", ["a", 100.5])
+def test_replace_inplace_reference_no_op(using_copy_on_write, to_replace):
df = DataFrame({"a": [1.5, 2, 3]})
arr_a = get_array(df, "a")
view = df[:]
- df.replace(to_replace=[1.5], value=15.5, inplace=True)
+ df.replace(to_replace=to_replace, value=15.5, inplace=True)
+ assert np.shares_memory(get_array(df, "a"), arr_a)
if using_copy_on_write:
- assert not np.shares_memory(get_array(df, "a"), arr_a)
+ assert not df._mgr._has_no_reference(0)
+ assert not view._mgr._has_no_reference(0)
+
+
+@pytest.mark.parametrize("to_replace", [1, [1]])
+@pytest.mark.parametrize("val", [1, 1.5])
+def test_replace_categorical_inplace_reference(using_copy_on_write, val, to_replace):
+ df = DataFrame({"a": Categorical([1, 2, 3])})
+ df_orig = df.copy()
+ arr_a = get_array(df, "a")
+ view = df[:]
+ df.replace(to_replace=to_replace, value=val, inplace=True)
+
+ if using_copy_on_write:
+ assert not np.shares_memory(get_array(df, "a").codes, arr_a.codes)
assert df._mgr._has_no_reference(0)
assert view._mgr._has_no_reference(0)
+ tm.assert_frame_equal(view, df_orig)
else:
- assert np.shares_memory(get_array(df, "a"), arr_a)
+ assert np.shares_memory(get_array(df, "a").codes, arr_a.codes)
+
+
+@pytest.mark.parametrize("val", [1, 1.5])
+def test_replace_categorical_inplace(using_copy_on_write, val):
+ df = DataFrame({"a": Categorical([1, 2, 3])})
+ arr_a = get_array(df, "a")
+ df.replace(to_replace=1, value=val, inplace=True)
+
+ assert np.shares_memory(get_array(df, "a").codes, arr_a.codes)
+ if using_copy_on_write:
+ assert df._mgr._has_no_reference(0)
+
+ expected = DataFrame({"a": Categorical([val, 2, 3])})
+ tm.assert_frame_equal(df, expected)
+
+
+@pytest.mark.parametrize("val", [1, 1.5])
+def test_replace_categorical(using_copy_on_write, val):
+ df = DataFrame({"a": Categorical([1, 2, 3])})
+ df_orig = df.copy()
+ df2 = df.replace(to_replace=1, value=val)
+
+ if using_copy_on_write:
+ assert df._mgr._has_no_reference(0)
+ assert df2._mgr._has_no_reference(0)
+ assert not np.shares_memory(get_array(df, "a").codes, get_array(df2, "a").codes)
+ tm.assert_frame_equal(df, df_orig)
+
+ arr_a = get_array(df2, "a").codes
+ df2.iloc[0, 0] = 2.0
+ assert np.shares_memory(get_array(df2, "a").codes, arr_a)
@pytest.mark.parametrize("method", ["where", "mask"])
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50918 | 2023-01-21T01:26:05Z | 2023-02-26T18:42:27Z | 2023-02-26T18:42:27Z | 2023-02-26T20:19:05Z |
ENH: Make shallow copy for align nocopy with CoW | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 998c57b66509d..ce123c704ba33 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -5429,6 +5429,8 @@ def _reindex_with_indexers(
if (copy or copy is None) and new_data is self._mgr:
new_data = new_data.copy(deep=copy)
+ elif using_copy_on_write() and new_data is self._mgr:
+ new_data = new_data.copy(deep=copy)
return self._constructor(new_data).__finalize__(self)
@@ -9469,6 +9471,7 @@ def _align_series(
limit=None,
fill_axis: Axis = 0,
):
+ uses_cow = using_copy_on_write()
is_series = isinstance(self, ABCSeries)
@@ -9492,7 +9495,10 @@ def _align_series(
if is_series:
left = self._reindex_indexer(join_index, lidx, copy)
elif lidx is None or join_index is None:
- left = self.copy(deep=copy) if copy or copy is None else self
+ if uses_cow:
+ left = self.copy(deep=copy)
+ else:
+ left = self.copy(deep=copy) if copy or copy is None else self
else:
left = self._constructor(
self._mgr.reindex_indexer(join_index, lidx, axis=1, copy=copy)
@@ -9521,7 +9527,10 @@ def _align_series(
left = self._constructor(fdata)
if ridx is None:
- right = other.copy(deep=copy) if copy or copy is None else other
+ if uses_cow:
+ right = other.copy(deep=copy)
+ else:
+ right = other.copy(deep=copy) if copy or copy is None else other
else:
right = other.reindex(join_index, level=level)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index c6ba217042353..e2fc75dda02a5 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -4643,6 +4643,8 @@ def _reindex_indexer(
if indexer is None and (
new_index is None or new_index.names == self.index.names
):
+ if using_copy_on_write():
+ return self.copy(deep=copy)
if copy or copy is None:
return self.copy(deep=copy)
return self
diff --git a/pandas/tests/copy_view/test_methods.py b/pandas/tests/copy_view/test_methods.py
index 0cec5522e39cd..0051b5db1a65a 100644
--- a/pandas/tests/copy_view/test_methods.py
+++ b/pandas/tests/copy_view/test_methods.py
@@ -456,6 +456,41 @@ def test_align_series(using_copy_on_write):
tm.assert_series_equal(ser_other, ser_orig)
+def test_align_copy_false(using_copy_on_write):
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
+ df_orig = df.copy()
+ df2, df3 = df.align(df, copy=False)
+
+ assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
+ assert np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
+
+ if using_copy_on_write:
+ df2.loc[0, "a"] = 0
+ tm.assert_frame_equal(df, df_orig) # Original is unchanged
+
+ df3.loc[0, "a"] = 0
+ tm.assert_frame_equal(df, df_orig) # Original is unchanged
+
+
+def test_align_with_series_copy_false(using_copy_on_write):
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
+ ser = Series([1, 2, 3], name="x")
+ ser_orig = ser.copy()
+ df_orig = df.copy()
+ df2, ser2 = df.align(ser, copy=False, axis=0)
+
+ assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
+ assert np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
+ assert np.shares_memory(get_array(ser, "x"), get_array(ser2, "x"))
+
+ if using_copy_on_write:
+ df2.loc[0, "a"] = 0
+ tm.assert_frame_equal(df, df_orig) # Original is unchanged
+
+ ser2.loc[0] = 0
+ tm.assert_series_equal(ser, ser_orig) # Original is unchanged
+
+
def test_to_frame(using_copy_on_write):
# Case: converting a Series to a DataFrame with to_frame
ser = Series([1, 2, 3])
diff --git a/pandas/tests/frame/methods/test_align.py b/pandas/tests/frame/methods/test_align.py
index 88963dcc4b0f7..d4883fd854e07 100644
--- a/pandas/tests/frame/methods/test_align.py
+++ b/pandas/tests/frame/methods/test_align.py
@@ -40,12 +40,15 @@ def test_frame_align_aware(self):
assert new1.index.tz is timezone.utc
assert new2.index.tz is timezone.utc
- def test_align_float(self, float_frame):
+ def test_align_float(self, float_frame, using_copy_on_write):
af, bf = float_frame.align(float_frame)
assert af._mgr is not float_frame._mgr
af, bf = float_frame.align(float_frame, copy=False)
- assert af._mgr is float_frame._mgr
+ if not using_copy_on_write:
+ assert af._mgr is float_frame._mgr
+ else:
+ assert af._mgr is not float_frame._mgr
# axis = 0
other = float_frame.iloc[:-5, :3]
diff --git a/pandas/tests/series/methods/test_align.py b/pandas/tests/series/methods/test_align.py
index f3fc46e1e39af..b2e03684bc902 100644
--- a/pandas/tests/series/methods/test_align.py
+++ b/pandas/tests/series/methods/test_align.py
@@ -82,7 +82,7 @@ def test_align_fill_method(
tm.assert_series_equal(ab, eb)
-def test_align_nocopy(datetime_series):
+def test_align_nocopy(datetime_series, using_copy_on_write):
b = datetime_series[:5].copy()
# do copy
@@ -95,7 +95,10 @@ def test_align_nocopy(datetime_series):
a = datetime_series.copy()
ra, _ = a.align(b, join="left", copy=False)
ra[:5] = 5
- assert (a[:5] == 5).all()
+ if using_copy_on_write:
+ assert not (a[:5] == 5).any()
+ else:
+ assert (a[:5] == 5).all()
# do copy
a = datetime_series.copy()
@@ -109,7 +112,10 @@ def test_align_nocopy(datetime_series):
b = datetime_series[:5].copy()
_, rb = a.align(b, join="right", copy=False)
rb[:2] = 5
- assert (b[:2] == 5).all()
+ if using_copy_on_write:
+ assert not (b[:2] == 5).any()
+ else:
+ assert (b[:2] == 5).all()
def test_align_same_index(datetime_series):
| xref https://github.com/pandas-dev/pandas/issues/49473
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50917 | 2023-01-21T01:25:28Z | 2023-01-27T23:37:39Z | 2023-01-27T23:37:39Z | 2023-01-27T23:37:43Z |
ENH: Add test for asfreq CoW when doing noop | diff --git a/pandas/tests/copy_view/test_methods.py b/pandas/tests/copy_view/test_methods.py
index 0cec5522e39cd..a98e2df7a7db1 100644
--- a/pandas/tests/copy_view/test_methods.py
+++ b/pandas/tests/copy_view/test_methods.py
@@ -1117,6 +1117,26 @@ def test_putmask(using_copy_on_write):
assert view.iloc[0, 0] == 5
+def test_asfreq_noop(using_copy_on_write):
+ df = DataFrame(
+ {"a": [0.0, None, 2.0, 3.0]},
+ index=date_range("1/1/2000", periods=4, freq="T"),
+ )
+ df_orig = df.copy()
+ df2 = df.asfreq(freq="T")
+
+ if using_copy_on_write:
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
+ else:
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
+
+ # mutating df2 triggers a copy-on-write for that column / block
+ df2.iloc[0, 0] = 0
+
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
+ tm.assert_frame_equal(df, df_orig)
+
+
def test_isetitem(using_copy_on_write):
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
df_orig = df.copy()
| xref https://github.com/pandas-dev/pandas/issues/49473
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50916 | 2023-01-21T01:24:20Z | 2023-01-24T07:59:44Z | 2023-01-24T07:59:44Z | 2023-01-24T13:34:27Z |
CLN: Use np.random.RandomState instead of tm.RNGContext | diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py
index eb25566e7983e..ce33c4c1e8457 100644
--- a/pandas/_testing/__init__.py
+++ b/pandas/_testing/__init__.py
@@ -99,7 +99,6 @@
get_obj,
)
from pandas._testing.contexts import (
- RNGContext,
decompress_file,
ensure_clean,
ensure_safe_environment_variables,
@@ -1125,7 +1124,6 @@ def shares_memory(left, right) -> bool:
"raise_assert_detail",
"rands",
"reset_display_options",
- "RNGContext",
"raises_chained_assignment_error",
"round_trip_localpath",
"round_trip_pathlib",
diff --git a/pandas/_testing/contexts.py b/pandas/_testing/contexts.py
index d0de085788782..1a6f78171bcef 100644
--- a/pandas/_testing/contexts.py
+++ b/pandas/_testing/contexts.py
@@ -4,7 +4,6 @@
import os
from pathlib import Path
import tempfile
-from types import TracebackType
from typing import (
IO,
Any,
@@ -12,8 +11,6 @@
)
import uuid
-import numpy as np
-
from pandas.compat import PYPY
from pandas.errors import ChainedAssignmentError
@@ -198,40 +195,6 @@ def use_numexpr(use, min_elements=None) -> Generator[None, None, None]:
set_option("compute.use_numexpr", olduse)
-class RNGContext:
- """
- Context manager to set the numpy random number generator speed. Returns
- to the original value upon exiting the context manager.
-
- Parameters
- ----------
- seed : int
- Seed for numpy.random.seed
-
- Examples
- --------
- with RNGContext(42):
- np.random.randn()
- """
-
- def __init__(self, seed) -> None:
- self.seed = seed
-
- def __enter__(self) -> None:
-
- self.start_state = np.random.get_state()
- np.random.seed(self.seed)
-
- def __exit__(
- self,
- exc_type: type[BaseException] | None,
- exc_value: BaseException | None,
- traceback: TracebackType | None,
- ) -> None:
-
- np.random.set_state(self.start_state)
-
-
def raises_chained_assignment_error():
if PYPY:
diff --git a/pandas/tests/plotting/conftest.py b/pandas/tests/plotting/conftest.py
index b88d9344da707..14c413f96c4ba 100644
--- a/pandas/tests/plotting/conftest.py
+++ b/pandas/tests/plotting/conftest.py
@@ -5,31 +5,30 @@
DataFrame,
to_datetime,
)
-import pandas._testing as tm
@pytest.fixture
def hist_df():
n = 100
- with tm.RNGContext(42):
- gender = np.random.choice(["Male", "Female"], size=n)
- classroom = np.random.choice(["A", "B", "C"], size=n)
+ np_random = np.random.RandomState(42)
+ gender = np_random.choice(["Male", "Female"], size=n)
+ classroom = np_random.choice(["A", "B", "C"], size=n)
- hist_df = DataFrame(
- {
- "gender": gender,
- "classroom": classroom,
- "height": np.random.normal(66, 4, size=n),
- "weight": np.random.normal(161, 32, size=n),
- "category": np.random.randint(4, size=n),
- "datetime": to_datetime(
- np.random.randint(
- 812419200000000000,
- 819331200000000000,
- size=n,
- dtype=np.int64,
- )
- ),
- }
- )
+ hist_df = DataFrame(
+ {
+ "gender": gender,
+ "classroom": classroom,
+ "height": np.random.normal(66, 4, size=n),
+ "weight": np.random.normal(161, 32, size=n),
+ "category": np.random.randint(4, size=n),
+ "datetime": to_datetime(
+ np.random.randint(
+ 812419200000000000,
+ 819331200000000000,
+ size=n,
+ dtype=np.int64,
+ )
+ ),
+ }
+ )
return hist_df
diff --git a/pandas/tests/plotting/frame/test_frame.py b/pandas/tests/plotting/frame/test_frame.py
index a32accb11a987..cc45b7eda7e27 100644
--- a/pandas/tests/plotting/frame/test_frame.py
+++ b/pandas/tests/plotting/frame/test_frame.py
@@ -366,51 +366,51 @@ def _compare_stacked_y_cood(self, normal_lines, stacked_lines):
@pytest.mark.parametrize("kind", ["line", "area"])
def test_line_area_stacked(self, kind):
- with tm.RNGContext(42):
- df = DataFrame(np.random.rand(6, 4), columns=["w", "x", "y", "z"])
- neg_df = -df
- # each column has either positive or negative value
- sep_df = DataFrame(
- {
- "w": np.random.rand(6),
- "x": np.random.rand(6),
- "y": -np.random.rand(6),
- "z": -np.random.rand(6),
- }
- )
- # each column has positive-negative mixed value
- mixed_df = DataFrame(
- np.random.randn(6, 4),
- index=list(string.ascii_letters[:6]),
- columns=["w", "x", "y", "z"],
- )
+ np_random = np.random.RandomState(42)
+ df = DataFrame(np_random.rand(6, 4), columns=["w", "x", "y", "z"])
+ neg_df = -df
+ # each column has either positive or negative value
+ sep_df = DataFrame(
+ {
+ "w": np_random.rand(6),
+ "x": np_random.rand(6),
+ "y": -np_random.rand(6),
+ "z": -np_random.rand(6),
+ }
+ )
+ # each column has positive-negative mixed value
+ mixed_df = DataFrame(
+ np_random.randn(6, 4),
+ index=list(string.ascii_letters[:6]),
+ columns=["w", "x", "y", "z"],
+ )
- ax1 = _check_plot_works(df.plot, kind=kind, stacked=False)
- ax2 = _check_plot_works(df.plot, kind=kind, stacked=True)
- self._compare_stacked_y_cood(ax1.lines, ax2.lines)
+ ax1 = _check_plot_works(df.plot, kind=kind, stacked=False)
+ ax2 = _check_plot_works(df.plot, kind=kind, stacked=True)
+ self._compare_stacked_y_cood(ax1.lines, ax2.lines)
- ax1 = _check_plot_works(neg_df.plot, kind=kind, stacked=False)
- ax2 = _check_plot_works(neg_df.plot, kind=kind, stacked=True)
- self._compare_stacked_y_cood(ax1.lines, ax2.lines)
+ ax1 = _check_plot_works(neg_df.plot, kind=kind, stacked=False)
+ ax2 = _check_plot_works(neg_df.plot, kind=kind, stacked=True)
+ self._compare_stacked_y_cood(ax1.lines, ax2.lines)
- ax1 = _check_plot_works(sep_df.plot, kind=kind, stacked=False)
- ax2 = _check_plot_works(sep_df.plot, kind=kind, stacked=True)
- self._compare_stacked_y_cood(ax1.lines[:2], ax2.lines[:2])
- self._compare_stacked_y_cood(ax1.lines[2:], ax2.lines[2:])
+ ax1 = _check_plot_works(sep_df.plot, kind=kind, stacked=False)
+ ax2 = _check_plot_works(sep_df.plot, kind=kind, stacked=True)
+ self._compare_stacked_y_cood(ax1.lines[:2], ax2.lines[:2])
+ self._compare_stacked_y_cood(ax1.lines[2:], ax2.lines[2:])
- _check_plot_works(mixed_df.plot, stacked=False)
- msg = (
- "When stacked is True, each column must be either all positive or "
- "all negative. Column 'w' contains both positive and negative "
- "values"
- )
- with pytest.raises(ValueError, match=msg):
- mixed_df.plot(stacked=True)
+ _check_plot_works(mixed_df.plot, stacked=False)
+ msg = (
+ "When stacked is True, each column must be either all positive or "
+ "all negative. Column 'w' contains both positive and negative "
+ "values"
+ )
+ with pytest.raises(ValueError, match=msg):
+ mixed_df.plot(stacked=True)
- # Use an index with strictly positive values, preventing
- # matplotlib from warning about ignoring xlim
- df2 = df.set_index(df.index + 1)
- _check_plot_works(df2.plot, kind=kind, logx=True, stacked=True)
+ # Use an index with strictly positive values, preventing
+ # matplotlib from warning about ignoring xlim
+ df2 = df.set_index(df.index + 1)
+ _check_plot_works(df2.plot, kind=kind, logx=True, stacked=True)
def test_line_area_nan_df(self):
values1 = [1, 2, np.nan, 3]
@@ -1237,20 +1237,18 @@ def test_all_invalid_plot_data(self):
df.plot(kind=kind)
def test_partially_invalid_plot_data(self):
- with tm.RNGContext(42):
- df = DataFrame(np.random.randn(10, 2), dtype=object)
- df[np.random.rand(df.shape[0]) > 0.5] = "a"
- for kind in plotting.PlotAccessor._common_kinds:
- msg = "no numeric data to plot"
- with pytest.raises(TypeError, match=msg):
- df.plot(kind=kind)
-
- with tm.RNGContext(42):
- # area plot doesn't support positive/negative mixed data
- df = DataFrame(np.random.rand(10, 2), dtype=object)
- df[np.random.rand(df.shape[0]) > 0.5] = "a"
- with pytest.raises(TypeError, match="no numeric data to plot"):
- df.plot(kind="area")
+ df = DataFrame(np.random.RandomState(42).randn(10, 2), dtype=object)
+ df[np.random.rand(df.shape[0]) > 0.5] = "a"
+ for kind in plotting.PlotAccessor._common_kinds:
+ msg = "no numeric data to plot"
+ with pytest.raises(TypeError, match=msg):
+ df.plot(kind=kind)
+
+ # area plot doesn't support positive/negative mixed data
+ df = DataFrame(np.random.RandomState(42).rand(10, 2), dtype=object)
+ df[np.random.rand(df.shape[0]) > 0.5] = "a"
+ with pytest.raises(TypeError, match="no numeric data to plot"):
+ df.plot(kind="area")
def test_invalid_kind(self):
df = DataFrame(np.random.randn(10, 2))
diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py
index ab7b2855768db..29276eba09346 100644
--- a/pandas/tests/plotting/test_boxplot_method.py
+++ b/pandas/tests/plotting/test_boxplot_method.py
@@ -362,8 +362,7 @@ def test_grouped_plot_fignums(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
height = Series(np.random.normal(60, 10, size=n))
- with tm.RNGContext(42):
- gender = np.random.choice(["male", "female"], size=n)
+ gender = np.random.RandomState(42).choice(["male", "female"], size=n)
df = DataFrame({"height": height, "weight": weight, "gender": gender})
gb = df.groupby("gender")
diff --git a/pandas/tests/plotting/test_groupby.py b/pandas/tests/plotting/test_groupby.py
index de81ad20f7370..d097e69c1415d 100644
--- a/pandas/tests/plotting/test_groupby.py
+++ b/pandas/tests/plotting/test_groupby.py
@@ -21,8 +21,7 @@ def test_series_groupby_plotting_nominally_works(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
height = Series(np.random.normal(60, 10, size=n))
- with tm.RNGContext(42):
- gender = np.random.choice(["male", "female"], size=n)
+ gender = np.random.RandomState(42).choice(["male", "female"], size=n)
weight.groupby(gender).plot()
tm.close()
diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py
index a80476038b7f1..76211df501891 100644
--- a/pandas/tests/plotting/test_hist_method.py
+++ b/pandas/tests/plotting/test_hist_method.py
@@ -510,8 +510,9 @@ def test_hist_df_kwargs(self):
def test_hist_df_with_nonnumerics(self):
# GH 9853
- with tm.RNGContext(1):
- df = DataFrame(np.random.randn(10, 4), columns=["A", "B", "C", "D"])
+ df = DataFrame(
+ np.random.RandomState(42).randn(10, 4), columns=["A", "B", "C", "D"]
+ )
df["E"] = ["x", "y"] * 5
_, ax = self.plt.subplots()
ax = df.plot.hist(bins=5, ax=ax)
@@ -665,8 +666,7 @@ def test_grouped_hist_legacy2(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
height = Series(np.random.normal(60, 10, size=n))
- with tm.RNGContext(42):
- gender_int = np.random.choice([0, 1], size=n)
+ gender_int = np.random.RandomState(42).choice([0, 1], size=n)
df_int = DataFrame({"height": height, "weight": weight, "gender": gender_int})
gb = df_int.groupby("gender")
axes = gb.hist()
diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py
index 9f0753ce0fecd..a89956d1c14c8 100644
--- a/pandas/tests/plotting/test_misc.py
+++ b/pandas/tests/plotting/test_misc.py
@@ -102,8 +102,7 @@ def test_scatter_matrix_axis(self, pass_axis):
if pass_axis:
_, ax = self.plt.subplots(3, 3)
- with tm.RNGContext(42):
- df = DataFrame(np.random.randn(100, 3))
+ df = DataFrame(np.random.RandomState(42).randn(100, 3))
# we are plotting multiples on a sub-plot
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
diff --git a/pandas/tests/util/test_util.py b/pandas/tests/util/test_util.py
index c68501e3ea260..802be634192a3 100644
--- a/pandas/tests/util/test_util.py
+++ b/pandas/tests/util/test_util.py
@@ -58,18 +58,6 @@ def test_datapath(datapath):
assert result == expected
-def test_rng_context():
- import numpy as np
-
- expected0 = 1.764052345967664
- expected1 = 1.6243453636632417
-
- with tm.RNGContext(0):
- with tm.RNGContext(1):
- assert np.random.randn() == expected1
- assert np.random.randn() == expected0
-
-
def test_external_error_raised():
with tm.external_error_raised(TypeError):
raise TypeError("Should not check this error message, so it will pass")
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/50915 | 2023-01-20T23:55:14Z | 2023-01-30T20:04:47Z | 2023-01-30T20:04:47Z | 2023-01-30T20:04:52Z |
REF: standardize reso-return in parsing | diff --git a/pandas/_libs/tslibs/dtypes.pxd b/pandas/_libs/tslibs/dtypes.pxd
index c0b0db1336d14..6c2871cd746b8 100644
--- a/pandas/_libs/tslibs/dtypes.pxd
+++ b/pandas/_libs/tslibs/dtypes.pxd
@@ -12,6 +12,7 @@ cpdef NPY_DATETIMEUNIT get_supported_reso(NPY_DATETIMEUNIT reso)
cdef dict attrname_to_abbrevs
cdef dict npy_unit_to_attrname
+cdef dict attrname_to_npy_unit
cdef enum c_FreqGroup:
# Mirrors FreqGroup in the .pyx file
diff --git a/pandas/_libs/tslibs/dtypes.pyx b/pandas/_libs/tslibs/dtypes.pyx
index 928f620b5e7c6..699e8aba76dd6 100644
--- a/pandas/_libs/tslibs/dtypes.pyx
+++ b/pandas/_libs/tslibs/dtypes.pyx
@@ -435,3 +435,4 @@ cdef dict npy_unit_to_attrname = {
NPY_DATETIMEUNIT.NPY_FR_us: "microsecond",
NPY_DATETIMEUNIT.NPY_FR_ns: "nanosecond",
}
+cdef dict attrname_to_npy_unit = {v: k for k, v in npy_unit_to_attrname.items()}
diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx
index aa53f8d813874..3cd6272988339 100644
--- a/pandas/_libs/tslibs/parsing.pyx
+++ b/pandas/_libs/tslibs/parsing.pyx
@@ -15,7 +15,9 @@ from cpython.datetime cimport (
timedelta,
tzinfo,
)
+
from datetime import timezone
+
from cpython.object cimport PyObject_Str
from cython cimport Py_ssize_t
from libc.string cimport strchr
@@ -52,18 +54,25 @@ from dateutil.tz import (
from pandas._config import get_option
from pandas._libs.tslibs.ccalendar cimport c_MONTH_NUMBERS
-from pandas._libs.tslibs.dtypes cimport npy_unit_to_attrname
+from pandas._libs.tslibs.dtypes cimport (
+ attrname_to_npy_unit,
+ npy_unit_to_attrname,
+)
from pandas._libs.tslibs.nattype cimport (
c_NaT as NaT,
c_nat_strings as nat_strings,
)
+
from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime
+
from pandas._libs.tslibs.np_datetime cimport (
NPY_DATETIMEUNIT,
npy_datetimestruct,
string_to_dts,
)
+
from pandas._libs.tslibs.strptime import array_strptime
+
from pandas._libs.tslibs.util cimport (
get_c_string_buf_and_size,
is_array,
@@ -92,6 +101,14 @@ _DEFAULT_DATETIME = datetime(1, 1, 1).replace(hour=0, minute=0,
cdef:
set _not_datelike_strings = {"a", "A", "m", "M", "p", "P", "t", "T"}
+ # _timestamp_units -> units that we round to nanos
+ set _timestamp_units = {
+ NPY_DATETIMEUNIT.NPY_FR_ns,
+ NPY_DATETIMEUNIT.NPY_FR_ps,
+ NPY_DATETIMEUNIT.NPY_FR_fs,
+ NPY_DATETIMEUNIT.NPY_FR_as,
+ }
+
# ----------------------------------------------------------------------
cdef:
const char* delimiters = " /-."
@@ -125,7 +142,7 @@ cdef int _parse_4digit(const char* s):
cdef datetime _parse_delimited_date(
- str date_string, bint dayfirst, NPY_DATETIMEUNIT* creso
+ str date_string, bint dayfirst, NPY_DATETIMEUNIT* out_bestunit
):
"""
Parse special cases of dates: MM/DD/YYYY, DD/MM/YYYY, MM/YYYY.
@@ -144,7 +161,7 @@ cdef datetime _parse_delimited_date(
----------
date_string : str
dayfirst : bool
- creso : NPY_DATETIMEUNIT*
+ out_bestunit : NPY_DATETIMEUNIT*
For specifying identified resolution.
Returns:
@@ -163,28 +180,28 @@ cdef datetime _parse_delimited_date(
month = _parse_2digit(buf)
day = _parse_2digit(buf + 3)
year = _parse_4digit(buf + 6)
- creso[0] = NPY_DATETIMEUNIT.NPY_FR_D
+ out_bestunit[0] = NPY_DATETIMEUNIT.NPY_FR_D
can_swap = 1
elif length == 9 and _is_delimiter(buf[1]) and _is_delimiter(buf[4]):
# parsing M?DD?YYYY and D?MM?YYYY dates
month = _parse_1digit(buf)
day = _parse_2digit(buf + 2)
year = _parse_4digit(buf + 5)
- creso[0] = NPY_DATETIMEUNIT.NPY_FR_D
+ out_bestunit[0] = NPY_DATETIMEUNIT.NPY_FR_D
can_swap = 1
elif length == 9 and _is_delimiter(buf[2]) and _is_delimiter(buf[4]):
# parsing MM?D?YYYY and DD?M?YYYY dates
month = _parse_2digit(buf)
day = _parse_1digit(buf + 3)
year = _parse_4digit(buf + 5)
- creso[0] = NPY_DATETIMEUNIT.NPY_FR_D
+ out_bestunit[0] = NPY_DATETIMEUNIT.NPY_FR_D
can_swap = 1
elif length == 8 and _is_delimiter(buf[1]) and _is_delimiter(buf[3]):
# parsing M?D?YYYY and D?M?YYYY dates
month = _parse_1digit(buf)
day = _parse_1digit(buf + 2)
year = _parse_4digit(buf + 4)
- creso[0] = NPY_DATETIMEUNIT.NPY_FR_D
+ out_bestunit[0] = NPY_DATETIMEUNIT.NPY_FR_D
can_swap = 1
elif length == 7 and _is_delimiter(buf[2]):
# parsing MM?YYYY dates
@@ -194,7 +211,7 @@ cdef datetime _parse_delimited_date(
return None
month = _parse_2digit(buf)
year = _parse_4digit(buf + 3)
- creso[0] = NPY_DATETIMEUNIT.NPY_FR_M
+ out_bestunit[0] = NPY_DATETIMEUNIT.NPY_FR_M
else:
return None
@@ -270,7 +287,8 @@ def parse_datetime_string(
cdef:
datetime dt
- NPY_DATETIMEUNIT creso
+ NPY_DATETIMEUNIT out_bestunit
+ bint is_quarter = 0
if not _does_string_look_like_datetime(date_string):
raise ValueError(f'Given date string "{date_string}" not likely a datetime')
@@ -281,21 +299,23 @@ def parse_datetime_string(
yearfirst=yearfirst)
return dt
- dt = _parse_delimited_date(date_string, dayfirst, &creso)
+ dt = _parse_delimited_date(date_string, dayfirst, &out_bestunit)
if dt is not None:
return dt
try:
- dt, _ = _parse_dateabbr_string(date_string, _DEFAULT_DATETIME, freq=None)
+ dt = _parse_dateabbr_string(
+ date_string, _DEFAULT_DATETIME, None, &out_bestunit, &is_quarter
+ )
return dt
except DateParseError:
raise
except ValueError:
pass
- dt, _ = dateutil_parse(date_string, default=_DEFAULT_DATETIME,
- dayfirst=dayfirst, yearfirst=yearfirst,
- ignoretz=False)
+ dt = dateutil_parse(date_string, default=_DEFAULT_DATETIME,
+ dayfirst=dayfirst, yearfirst=yearfirst,
+ ignoretz=False, out_bestunit=&out_bestunit)
if dt.tzinfo is not None:
# dateutil can return a datetime with a tzoffset outside of (-24H, 24H)
@@ -361,26 +381,24 @@ def parse_datetime_string_with_reso(
int out_local = 0
int out_tzoffset
tzinfo tz
+ bint is_quarter = 0
if not _does_string_look_like_datetime(date_string):
raise ValueError(f'Given date string "{date_string}" not likely a datetime')
- parsed = _parse_delimited_date(date_string, dayfirst, &out_bestunit)
- if parsed is not None:
- reso = npy_unit_to_attrname[out_bestunit]
- return parsed, reso
-
# Try iso8601 first, as it handles nanoseconds
string_to_dts_failed = string_to_dts(
date_string, &dts, &out_bestunit, &out_local,
&out_tzoffset, False
)
if not string_to_dts_failed:
- timestamp_units = {NPY_DATETIMEUNIT.NPY_FR_ns,
- NPY_DATETIMEUNIT.NPY_FR_ps,
- NPY_DATETIMEUNIT.NPY_FR_fs,
- NPY_DATETIMEUNIT.NPY_FR_as}
- if out_bestunit in timestamp_units:
+ # Match Timestamp and drop picoseconds, femtoseconds, attoseconds
+ # The new resolution will just be nano
+ # GH#50417
+ if out_bestunit in _timestamp_units:
+ out_bestunit = NPY_DATETIMEUNIT.NPY_FR_ns
+
+ if out_bestunit == NPY_DATETIMEUNIT.NPY_FR_ns:
# TODO: avoid circular import
from pandas import Timestamp
parsed = Timestamp(date_string)
@@ -392,25 +410,34 @@ def parse_datetime_string_with_reso(
parsed = datetime_new(
dts.year, dts.month, dts.day, dts.hour, dts.min, dts.sec, dts.us, tz
)
- # Match Timestamp and drop picoseconds, femtoseconds, attoseconds
- # The new resolution will just be nano
- # GH 50417
- if out_bestunit in timestamp_units:
- out_bestunit = NPY_DATETIMEUNIT.NPY_FR_ns
reso = npy_unit_to_attrname[out_bestunit]
return parsed, reso
+ parsed = _parse_delimited_date(date_string, dayfirst, &out_bestunit)
+ if parsed is not None:
+ reso = npy_unit_to_attrname[out_bestunit]
+ return parsed, reso
+
try:
- return _parse_dateabbr_string(date_string, _DEFAULT_DATETIME, freq)
+ parsed = _parse_dateabbr_string(
+ date_string, _DEFAULT_DATETIME, freq, &out_bestunit, &is_quarter
+ )
except DateParseError:
raise
except ValueError:
pass
+ else:
+ if is_quarter:
+ reso = "quarter"
+ else:
+ reso = npy_unit_to_attrname[out_bestunit]
+ return parsed, reso
- parsed, reso = dateutil_parse(date_string, _DEFAULT_DATETIME,
- dayfirst=dayfirst, yearfirst=yearfirst,
- ignoretz=False)
+ parsed = dateutil_parse(date_string, _DEFAULT_DATETIME,
+ dayfirst=dayfirst, yearfirst=yearfirst,
+ ignoretz=False, out_bestunit=&out_bestunit)
+ reso = npy_unit_to_attrname[out_bestunit]
return parsed, reso
@@ -461,8 +488,9 @@ cpdef bint _does_string_look_like_datetime(str py_string):
return True
-cdef object _parse_dateabbr_string(str date_string, datetime default,
- str freq=None):
+cdef datetime _parse_dateabbr_string(str date_string, datetime default,
+ str freq, NPY_DATETIMEUNIT* out_bestunit,
+ bint* is_quarter):
# special handling for possibilities eg, 2Q2005, 2Q05, 2005Q1, 05Q1
cdef:
datetime ret
@@ -472,7 +500,9 @@ cdef object _parse_dateabbr_string(str date_string, datetime default,
const char* buf
if date_string in nat_strings:
- return NaT, ""
+ # default to nanos, could also reasonably do NPY_FR_GENERIC
+ out_bestunit[0] = NPY_DATETIMEUNIT.NPY_FR_ns
+ return NaT
date_string = date_string.upper()
date_len = len(date_string)
@@ -481,7 +511,8 @@ cdef object _parse_dateabbr_string(str date_string, datetime default,
# parse year only like 2000
try:
ret = default.replace(year=int(date_string))
- return ret, "year"
+ out_bestunit[0] = NPY_DATETIMEUNIT.NPY_FR_Y
+ return ret
except ValueError:
pass
@@ -534,7 +565,10 @@ cdef object _parse_dateabbr_string(str date_string, datetime default,
f"freq: {freq}")
ret = default.replace(year=year, month=month)
- return ret, "quarter"
+ # Monthly is as close as we can get to a non-existent NPY_FR_Q
+ out_bestunit[0] = NPY_DATETIMEUNIT.NPY_FR_M
+ is_quarter[0] = 1
+ return ret
except DateParseError:
raise
@@ -547,7 +581,8 @@ cdef object _parse_dateabbr_string(str date_string, datetime default,
month = int(date_string[4:6])
try:
ret = default.replace(year=year, month=month)
- return ret, "month"
+ out_bestunit[0] = NPY_DATETIMEUNIT.NPY_FR_M
+ return ret
except ValueError as err:
# We can infer that none of the patterns below will match
raise ValueError(f"Unable to parse {date_string}") from err
@@ -555,7 +590,8 @@ cdef object _parse_dateabbr_string(str date_string, datetime default,
for pat in ["%Y-%m", "%b %Y", "%b-%Y"]:
try:
ret = datetime.strptime(date_string, pat)
- return ret, "month"
+ out_bestunit[0] = NPY_DATETIMEUNIT.NPY_FR_M
+ return ret
except ValueError:
pass
@@ -597,12 +633,13 @@ cpdef quarter_to_myear(int year, int quarter, str freq):
return year, month
-cdef dateutil_parse(
+cdef datetime dateutil_parse(
str timestr,
datetime default,
- bint ignoretz=False,
- bint dayfirst=False,
- bint yearfirst=False,
+ bint ignoretz,
+ bint dayfirst,
+ bint yearfirst,
+ NPY_DATETIMEUNIT* out_bestunit
):
""" lifted from dateutil to get resolution"""
@@ -658,7 +695,9 @@ cdef dateutil_parse(
ret = ret.replace(tzinfo=_dateutil_tzutc())
elif res.tzoffset:
ret = ret.replace(tzinfo=tzoffset(res.tzname, res.tzoffset))
- return ret, reso
+
+ out_bestunit[0] = attrname_to_npy_unit[reso]
+ return ret
# ----------------------------------------------------------------------
| Moving towards getting reso inference working in the remaining cases in convert_string_to_tsobject, and hopefully de-duplicating parse_datetime_string vs parse_datetime_string_with_reso | https://api.github.com/repos/pandas-dev/pandas/pulls/50914 | 2023-01-20T23:40:18Z | 2023-01-25T23:15:29Z | 2023-01-25T23:15:29Z | 2023-01-25T23:19:35Z |
REF: Define methods on resample non-dynamically | diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index c15948ce877a8..b0f8a0b7a96af 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -2004,8 +2004,6 @@ def mean(
return result.__finalize__(self.obj, method="groupby")
@final
- @Substitution(name="groupby")
- @Appender(_common_see_also)
def median(self, numeric_only: bool = False):
"""
Compute median of groups, excluding missing values.
@@ -2315,8 +2313,6 @@ def _value_counts(
return result.__finalize__(self.obj, method="value_counts")
@final
- @Substitution(name="groupby")
- @Appender(_common_see_also)
def sem(self, ddof: int = 1, numeric_only: bool = False):
"""
Compute standard error of the mean of groups, excluding missing values.
@@ -2471,7 +2467,6 @@ def max(
)
@final
- @Substitution(name="groupby")
def first(self, numeric_only: bool = False, min_count: int = -1):
"""
Compute the first non-null entry of each column.
@@ -2542,7 +2537,6 @@ def first(x: Series):
)
@final
- @Substitution(name="groupby")
def last(self, numeric_only: bool = False, min_count: int = -1):
"""
Compute the last non-null entry of each column.
@@ -2602,8 +2596,6 @@ def last(x: Series):
)
@final
- @Substitution(name="groupby")
- @Appender(_common_see_also)
def ohlc(self) -> DataFrame:
"""
Compute open, high, low and close values of a group, excluding missing values.
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 907d6522d3236..ac303e4b1f0bf 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -895,6 +895,74 @@ def asfreq(self, fill_value=None):
"""
return self._upsample("asfreq", fill_value=fill_value)
+ def sum(
+ self,
+ numeric_only: bool = False,
+ min_count: int = 0,
+ *args,
+ **kwargs,
+ ):
+ nv.validate_resampler_func("sum", args, kwargs)
+ return self._downsample("sum", numeric_only=numeric_only, min_count=min_count)
+
+ @doc(GroupBy.prod)
+ def prod(
+ self,
+ numeric_only: bool = False,
+ min_count: int = 0,
+ *args,
+ **kwargs,
+ ):
+ nv.validate_resampler_func("prod", args, kwargs)
+ return self._downsample("prod", numeric_only=numeric_only, min_count=min_count)
+
+ def min(
+ self,
+ numeric_only: bool = False,
+ min_count: int = 0,
+ *args,
+ **kwargs,
+ ):
+ nv.validate_resampler_func("min", args, kwargs)
+ return self._downsample("min", numeric_only=numeric_only, min_count=min_count)
+
+ def max(
+ self,
+ numeric_only: bool = False,
+ min_count: int = 0,
+ *args,
+ **kwargs,
+ ):
+ nv.validate_resampler_func("max", args, kwargs)
+ return self._downsample("max", numeric_only=numeric_only, min_count=min_count)
+
+ @doc(GroupBy.first)
+ def first(
+ self,
+ numeric_only: bool = False,
+ min_count: int = 0,
+ *args,
+ **kwargs,
+ ):
+ nv.validate_resampler_func("first", args, kwargs)
+ return self._downsample("first", numeric_only=numeric_only, min_count=min_count)
+
+ @doc(GroupBy.last)
+ def last(
+ self,
+ numeric_only: bool = False,
+ min_count: int = 0,
+ *args,
+ **kwargs,
+ ):
+ nv.validate_resampler_func("last", args, kwargs)
+ return self._downsample("last", numeric_only=numeric_only, min_count=min_count)
+
+ @doc(GroupBy.median)
+ def median(self, numeric_only: bool = False, *args, **kwargs):
+ nv.validate_resampler_func("median", args, kwargs)
+ return self._downsample("median", numeric_only=numeric_only)
+
def mean(
self,
numeric_only: bool = False,
@@ -984,6 +1052,35 @@ def var(
nv.validate_resampler_func("var", args, kwargs)
return self._downsample("var", ddof=ddof, numeric_only=numeric_only)
+ @doc(GroupBy.sem)
+ def sem(
+ self,
+ ddof: int = 1,
+ numeric_only: bool = False,
+ *args,
+ **kwargs,
+ ):
+ nv.validate_resampler_func("sem", args, kwargs)
+ return self._downsample("sem", ddof=ddof, numeric_only=numeric_only)
+
+ @doc(GroupBy.ohlc)
+ def ohlc(
+ self,
+ *args,
+ **kwargs,
+ ):
+ nv.validate_resampler_func("ohlc", args, kwargs)
+ return self._downsample("ohlc")
+
+ @doc(SeriesGroupBy.nunique)
+ def nunique(
+ self,
+ *args,
+ **kwargs,
+ ):
+ nv.validate_resampler_func("nunique", args, kwargs)
+ return self._downsample("nunique")
+
@doc(GroupBy.size)
def size(self):
result = self._downsample("size")
@@ -1047,87 +1144,6 @@ def quantile(self, q: float | AnyArrayLike = 0.5, **kwargs):
return self._downsample("quantile", q=q, **kwargs)
-def _add_downsample_kernel(
- name: str, args: tuple[str, ...], docs_class: type = GroupBy
-) -> None:
- """
- Add a kernel to Resampler.
-
- Arguments
- ---------
- name : str
- Name of the kernel.
- args : tuple
- Arguments of the method.
- docs_class : type
- Class to get kernel docstring from.
- """
- assert args in (
- ("numeric_only", "min_count"),
- ("numeric_only",),
- ("ddof", "numeric_only"),
- (),
- )
-
- # Explicitly provide args rather than args/kwargs for API docs
- if args == ("numeric_only", "min_count"):
-
- def f(
- self,
- numeric_only: bool = False,
- min_count: int = 0,
- *args,
- **kwargs,
- ):
- nv.validate_resampler_func(name, args, kwargs)
- return self._downsample(
- name, numeric_only=numeric_only, min_count=min_count
- )
-
- elif args == ("numeric_only",):
- # error: All conditional function variants must have identical signatures
- def f(self, numeric_only: bool = False, *args, **kwargs): # type: ignore[misc]
- nv.validate_resampler_func(name, args, kwargs)
- return self._downsample(name, numeric_only=numeric_only)
-
- elif args == ("ddof", "numeric_only"):
- # error: All conditional function variants must have identical signatures
- def f( # type: ignore[misc]
- self,
- ddof: int = 1,
- numeric_only: bool = False,
- *args,
- **kwargs,
- ):
- nv.validate_resampler_func(name, args, kwargs)
- return self._downsample(name, ddof=ddof, numeric_only=numeric_only)
-
- else:
- # error: All conditional function variants must have identical signatures
- def f( # type: ignore[misc]
- self,
- *args,
- **kwargs,
- ):
- nv.validate_resampler_func(name, args, kwargs)
- return self._downsample(name)
-
- f.__doc__ = getattr(docs_class, name).__doc__
- setattr(Resampler, name, f)
-
-
-for _method in ["sum", "prod", "min", "max", "first", "last"]:
- _add_downsample_kernel(_method, ("numeric_only", "min_count"))
-for _method in ["median"]:
- _add_downsample_kernel(_method, ("numeric_only",))
-for _method in ["sem"]:
- _add_downsample_kernel(_method, ("ddof", "numeric_only"))
-for _method in ["ohlc"]:
- _add_downsample_kernel(_method, ())
-for _method in ["nunique"]:
- _add_downsample_kernel(_method, (), SeriesGroupBy)
-
-
class _GroupByMixin(PandasObject):
"""
Provide the groupby facilities.
| - [ ] closes #49001 (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50913 | 2023-01-20T23:15:36Z | 2023-01-25T22:44:07Z | 2023-01-25T22:44:07Z | 2023-01-25T22:44:12Z |
Changes how the latest version is taken from a list of versions and cuts out obsolete versions on the homepage | diff --git a/web/pandas_web.py b/web/pandas_web.py
index 8c508a15f9a2b..86ca309b9c2b0 100755
--- a/web/pandas_web.py
+++ b/web/pandas_web.py
@@ -36,6 +36,8 @@
import sys
import time
import typing
+from packaging import version
+from itertools import groupby
import feedparser
import jinja2
@@ -223,9 +225,20 @@ def home_add_releases(context):
with open(pathlib.Path(context["target_path"]) / "releases.json", "w") as f:
json.dump(releases, f, default=datetime.datetime.isoformat)
- for release in releases:
+ non_obsolete_releases = []
+
+ # This is necessary for the versions to be properly grouped
+ releases = sorted(releases, key=lambda release:version.parse(release["tag_name"]), reverse=True)
+
+ for _, group in groupby(releases, key=lambda release:
+ ((version.parse(release["tag_name"]).major), version.parse(release["tag_name"]).minor)):
+
+ non_obsolete_releases.append(max(group, key=lambda release: version.parse(release["tag_name"])))
+
+ for release in non_obsolete_releases:
if release["prerelease"]:
continue
+
published = datetime.datetime.strptime(
release["published_at"], "%Y-%m-%dT%H:%M:%SZ"
)
diff --git a/web/tests/test_pandas_web.py b/web/tests/test_pandas_web.py
new file mode 100644
index 0000000000000..8fbc8321b65eb
--- /dev/null
+++ b/web/tests/test_pandas_web.py
@@ -0,0 +1,22 @@
+import yaml
+import os
+# Not working in code. the pandas directory is set up for this, but not the web directory
+from pandas_web import Preprocessors
+import pytest
+
+@pytest.fixture # I think this is fine
+def test_home_releases_versions():
+ context_path = os.path.join('pandas/config.yml')
+
+ # As it is, the home_add_releases function pull data from the github release page
+ # with an http request. After I run the function, I print the data out.
+ # In order to run custom data, you would need to modify the function in some way
+ with open(context_path, 'r') as context_file:
+ context = yaml.safe_load(context_file)
+
+ context["target_path"] = "build"
+
+ Preprocessors.home_add_releases(context)
+
+ for release in context["releases"]:
+ print(release)
\ No newline at end of file
| - [ ] closes Better management of releases in the pandas home page #50885
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50912 | 2023-01-20T22:47:44Z | 2023-04-02T16:30:01Z | null | 2023-04-02T16:30:01Z |
TST: read_fwf with dtype_backend | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index bbecf3fee01f3..04c54d7ea4332 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -54,6 +54,7 @@ to select the nullable dtypes implementation.
* :func:`read_csv` (with ``engine="pyarrow"`` or ``engine="python"``)
* :func:`read_clipboard` (with ``engine="python"``)
+* :func:`read_fwf`
* :func:`read_excel`
* :func:`read_html`
* :func:`read_xml`
diff --git a/pandas/tests/io/parser/test_read_fwf.py b/pandas/tests/io/parser/test_read_fwf.py
index 0dc8ee81278dd..f4320f6480517 100644
--- a/pandas/tests/io/parser/test_read_fwf.py
+++ b/pandas/tests/io/parser/test_read_fwf.py
@@ -948,24 +948,27 @@ def test_widths_and_usecols():
tm.assert_frame_equal(result, expected)
-def test_use_nullable_dtypes(string_storage):
+@pytest.mark.parametrize("dtype_backend", ["pandas", "pyarrow"])
+def test_use_nullable_dtypes(string_storage, dtype_backend):
# GH#50289
- data = """a b c d e f g h i
-1 2.5 True a
-3 4.5 False b True 6 7.5 a"""
- with pd.option_context("mode.string_storage", string_storage):
- result = read_fwf(StringIO(data), use_nullable_dtypes=True)
+ if string_storage == "pyarrow" or dtype_backend == "pyarrow":
+ pa = pytest.importorskip("pyarrow")
if string_storage == "python":
arr = StringArray(np.array(["a", "b"], dtype=np.object_))
arr_na = StringArray(np.array([pd.NA, "a"], dtype=np.object_))
else:
- import pyarrow as pa
-
arr = ArrowStringArray(pa.array(["a", "b"]))
arr_na = ArrowStringArray(pa.array([None, "a"]))
+ data = """a b c d e f g h i
+1 2.5 True a
+3 4.5 False b True 6 7.5 a"""
+ with pd.option_context("mode.string_storage", string_storage):
+ with pd.option_context("mode.dtype_backend", dtype_backend):
+ result = read_fwf(StringIO(data), use_nullable_dtypes=True)
+
expected = DataFrame(
{
"a": pd.Series([1, 3], dtype="Int64"),
@@ -979,4 +982,15 @@ def test_use_nullable_dtypes(string_storage):
"i": pd.Series([pd.NA, pd.NA], dtype="Int64"),
}
)
+ if dtype_backend == "pyarrow":
+ from pandas.arrays import ArrowExtensionArray
+
+ expected = DataFrame(
+ {
+ col: ArrowExtensionArray(pa.array(expected[col], from_pandas=True))
+ for col in expected.columns
+ }
+ )
+ expected["i"] = ArrowExtensionArray(pa.array([None, None]))
+
tm.assert_frame_equal(result, expected)
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50911 | 2023-01-20T22:26:28Z | 2023-01-22T14:52:49Z | 2023-01-22T14:52:49Z | 2023-01-22T17:52:50Z |
ENH: Add dtype_backend to to_numeric | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index bbecf3fee01f3..0ceda331de790 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -61,6 +61,7 @@ to select the nullable dtypes implementation.
* :func:`read_parquet`
* :func:`read_orc`
* :func:`read_feather`
+* :func:`to_numeric`
And the following methods will also utilize the ``mode.dtype_backend`` option.
diff --git a/pandas/core/tools/numeric.py b/pandas/core/tools/numeric.py
index a8ae8c47b0d19..64bb34241d956 100644
--- a/pandas/core/tools/numeric.py
+++ b/pandas/core/tools/numeric.py
@@ -4,6 +4,8 @@
import numpy as np
+from pandas._config import get_option
+
from pandas._libs import lib
from pandas._typing import (
DateTimeErrorChoices,
@@ -190,6 +192,9 @@ def to_numeric(
values = values._data[~mask]
values_dtype = getattr(values, "dtype", None)
+ if isinstance(values_dtype, pd.ArrowDtype):
+ mask = values.isna()
+ values = values.dropna().to_numpy()
new_mask: np.ndarray | None = None
if is_numeric_dtype(values_dtype):
pass
@@ -258,6 +263,7 @@ def to_numeric(
data[~mask] = values
from pandas.core.arrays import (
+ ArrowExtensionArray,
BooleanArray,
FloatingArray,
IntegerArray,
@@ -272,6 +278,11 @@ def to_numeric(
klass = FloatingArray
values = klass(data, mask)
+ if get_option("mode.dtype_backend") == "pyarrow" or isinstance(
+ values_dtype, pd.ArrowDtype
+ ):
+ values = ArrowExtensionArray(values.__arrow_array__())
+
if is_series:
return arg._constructor(values, index=arg.index, name=arg.name)
elif is_index:
diff --git a/pandas/tests/tools/test_to_numeric.py b/pandas/tests/tools/test_to_numeric.py
index d3701c30aa50c..a2b94883d457d 100644
--- a/pandas/tests/tools/test_to_numeric.py
+++ b/pandas/tests/tools/test_to_numeric.py
@@ -9,6 +9,7 @@
DataFrame,
Index,
Series,
+ option_context,
to_numeric,
)
import pandas._testing as tm
@@ -813,39 +814,86 @@ def test_to_numeric_use_nullable_dtypes(val, dtype):
@pytest.mark.parametrize(
- "val, dtype", [(1, "Int64"), (1.5, "Float64"), (True, "boolean")]
+ "val, dtype",
+ [
+ (1, "Int64"),
+ (1.5, "Float64"),
+ (True, "boolean"),
+ (1, "int64[pyarrow]"),
+ (1.5, "float64[pyarrow]"),
+ (True, "bool[pyarrow]"),
+ ],
)
def test_to_numeric_use_nullable_dtypes_na(val, dtype):
# GH#50505
+ if "pyarrow" in dtype:
+ pytest.importorskip("pyarrow")
+ dtype_backend = "pyarrow"
+ else:
+ dtype_backend = "pandas"
ser = Series([val, None], dtype=object)
- result = to_numeric(ser, use_nullable_dtypes=True)
+ with option_context("mode.dtype_backend", dtype_backend):
+ result = to_numeric(ser, use_nullable_dtypes=True)
expected = Series([val, pd.NA], dtype=dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"val, dtype, downcast",
- [(1, "Int8", "integer"), (1.5, "Float32", "float"), (1, "Int8", "signed")],
+ [
+ (1, "Int8", "integer"),
+ (1.5, "Float32", "float"),
+ (1, "Int8", "signed"),
+ (1, "int8[pyarrow]", "integer"),
+ (1.5, "float[pyarrow]", "float"),
+ (1, "int8[pyarrow]", "signed"),
+ ],
)
def test_to_numeric_use_nullable_dtypes_downcasting(val, dtype, downcast):
# GH#50505
+ if "pyarrow" in dtype:
+ pytest.importorskip("pyarrow")
+ dtype_backend = "pyarrow"
+ else:
+ dtype_backend = "pandas"
ser = Series([val, None], dtype=object)
- result = to_numeric(ser, use_nullable_dtypes=True, downcast=downcast)
+ with option_context("mode.dtype_backend", dtype_backend):
+ result = to_numeric(ser, use_nullable_dtypes=True, downcast=downcast)
expected = Series([val, pd.NA], dtype=dtype)
tm.assert_series_equal(result, expected)
-def test_to_numeric_use_nullable_dtypes_downcasting_uint():
+@pytest.mark.parametrize(
+ "smaller, dtype_backend", [["UInt8", "pandas"], ["uint8[pyarrow]", "pyarrow"]]
+)
+def test_to_numeric_use_nullable_dtypes_downcasting_uint(smaller, dtype_backend):
# GH#50505
+ if dtype_backend == "pyarrow":
+ pytest.importorskip("pyarrow")
ser = Series([1, pd.NA], dtype="UInt64")
- result = to_numeric(ser, use_nullable_dtypes=True, downcast="unsigned")
- expected = Series([1, pd.NA], dtype="UInt8")
+ with option_context("mode.dtype_backend", dtype_backend):
+ result = to_numeric(ser, use_nullable_dtypes=True, downcast="unsigned")
+ expected = Series([1, pd.NA], dtype=smaller)
tm.assert_series_equal(result, expected)
-@pytest.mark.parametrize("dtype", ["Int64", "UInt64", "Float64", "boolean"])
+@pytest.mark.parametrize(
+ "dtype",
+ [
+ "Int64",
+ "UInt64",
+ "Float64",
+ "boolean",
+ "int64[pyarrow]",
+ "uint64[pyarrow]",
+ "float64[pyarrow]",
+ "bool[pyarrow]",
+ ],
+)
def test_to_numeric_use_nullable_dtypes_already_nullable(dtype):
# GH#50505
+ if "pyarrow" in dtype:
+ pytest.importorskip("pyarrow")
ser = Series([1, pd.NA], dtype=dtype)
result = to_numeric(ser, use_nullable_dtypes=True)
expected = Series([1, pd.NA], dtype=dtype)
@@ -855,16 +903,30 @@ def test_to_numeric_use_nullable_dtypes_already_nullable(dtype):
@pytest.mark.parametrize(
"use_nullable_dtypes, dtype", [(True, "Float64"), (False, "float64")]
)
-def test_to_numeric_use_nullable_dtypes_error(use_nullable_dtypes, dtype):
+@pytest.mark.parametrize("dtype_backend", ["pandas", "pyarrow"])
+def test_to_numeric_use_nullable_dtypes_error(
+ use_nullable_dtypes, dtype, dtype_backend
+):
# GH#50505
+ if dtype_backend == "pyarrow":
+ pytest.importorskip("pyarrow")
ser = Series(["a", "b", ""])
expected = ser.copy()
with pytest.raises(ValueError, match="Unable to parse string"):
- to_numeric(ser, use_nullable_dtypes=use_nullable_dtypes)
+ with option_context("mode.dtype_backend", dtype_backend):
+ to_numeric(ser, use_nullable_dtypes=use_nullable_dtypes)
- result = to_numeric(ser, use_nullable_dtypes=use_nullable_dtypes, errors="ignore")
+ with option_context("mode.dtype_backend", dtype_backend):
+ result = to_numeric(
+ ser, use_nullable_dtypes=use_nullable_dtypes, errors="ignore"
+ )
tm.assert_series_equal(result, expected)
- result = to_numeric(ser, use_nullable_dtypes=use_nullable_dtypes, errors="coerce")
+ with option_context("mode.dtype_backend", dtype_backend):
+ result = to_numeric(
+ ser, use_nullable_dtypes=use_nullable_dtypes, errors="coerce"
+ )
+ if use_nullable_dtypes and dtype_backend == "pyarrow":
+ dtype = "double[pyarrow]"
expected = Series([np.nan, np.nan, np.nan], dtype=dtype)
tm.assert_series_equal(result, expected)
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/50910 | 2023-01-20T22:05:53Z | 2023-01-22T01:12:15Z | 2023-01-22T01:12:15Z | 2023-01-23T18:06:11Z |
DEPR: casting strings to float in to_datetime with unit | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index a30d68319cafe..b9ee5d5590861 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -630,6 +630,7 @@ Other API changes
Deprecations
~~~~~~~~~~~~
- Deprecated argument ``infer_datetime_format`` in :func:`to_datetime` and :func:`read_csv`, as a strict version of it is now the default (:issue:`48621`)
+- Deprecated behavior of :func:`to_datetime` with ``unit`` when parsing strings, in a future version these will be parsed as datetimes (matching unit-less behavior) instead of cast to floats. To retain the old behavior, cast strings to numeric types before calling :func:`to_datetime` (:issue:`50735`)
- Deprecated :func:`pandas.io.sql.execute` (:issue:`50185`)
- :meth:`Index.is_boolean` has been deprecated. Use :func:`pandas.api.types.is_bool_dtype` instead (:issue:`50042`)
- :meth:`Index.is_integer` has been deprecated. Use :func:`pandas.api.types.is_integer_dtype` instead (:issue:`50042`)
@@ -637,6 +638,7 @@ Deprecations
- :meth:`Index.holds_integer` has been deprecated. Use :func:`pandas.api.types.infer_dtype` instead (:issue:`50243`)
- :meth:`Index.is_categorical` has been deprecated. Use :func:`pandas.api.types.is_categorical_dtype` instead (:issue:`50042`)
- :meth:`Index.is_interval` has been deprecated. Use :func:`pandas.api.types.is_intterval_dtype` instead (:issue:`50042`)
+-
.. ---------------------------------------------------------------------------
.. _whatsnew_200.prior_deprecations:
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 2d31eeaa53a61..9d9b93f274c60 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -1,3 +1,7 @@
+import warnings
+
+from pandas.util._exceptions import find_stack_level
+
cimport cython
from datetime import timezone
@@ -303,6 +307,16 @@ def array_with_unit_to_datetime(
raise ValueError(
f"non convertible value {val} with the unit '{unit}'"
)
+ warnings.warn(
+ "The behavior of 'to_datetime' with 'unit' when parsing "
+ "strings is deprecated. In a future version, strings will "
+ "be parsed as datetime strings, matching the behavior "
+ "without a 'unit'. To retain the old behavior, explicitly "
+ "cast ints or floats to numeric type before calling "
+ "to_datetime.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
iresult[i] = cast_from_unit(fval, unit)
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index afb0be0729344..fb64e089d53a8 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -1222,7 +1222,9 @@ def _try_convert_to_date(self, data):
if new_data.dtype == "object":
try:
new_data = data.astype("int64")
- except (TypeError, ValueError, OverflowError):
+ except OverflowError:
+ return data, False
+ except (TypeError, ValueError):
pass
# ignore numbers that are out of range
diff --git a/pandas/tests/groupby/test_value_counts.py b/pandas/tests/groupby/test_value_counts.py
index 3d1228d65ac7c..ae4b74fc814da 100644
--- a/pandas/tests/groupby/test_value_counts.py
+++ b/pandas/tests/groupby/test_value_counts.py
@@ -135,9 +135,7 @@ def test_series_groupby_value_counts_with_grouper(utc):
}
).drop([3])
- df["Datetime"] = to_datetime(
- df["Timestamp"].apply(lambda t: str(t)), utc=utc, unit="s"
- )
+ df["Datetime"] = to_datetime(df["Timestamp"], utc=utc, unit="s")
dfg = df.groupby(Grouper(freq="1D", key="Datetime"))
# have to sort on index because of unstable sort on values xref GH9212
@@ -1010,9 +1008,7 @@ def test_value_counts_time_grouper(utc):
}
).drop([3])
- df["Datetime"] = to_datetime(
- df["Timestamp"].apply(lambda t: str(t)), utc=utc, unit="s"
- )
+ df["Datetime"] = to_datetime(df["Timestamp"], utc=utc, unit="s")
gb = df.groupby(Grouper(freq="1D", key="Datetime"))
result = gb.value_counts()
dates = to_datetime(
diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py
index dfbe78e53de40..a1217b268613a 100644
--- a/pandas/tests/tools/test_to_datetime.py
+++ b/pandas/tests/tools/test_to_datetime.py
@@ -1723,11 +1723,13 @@ def test_to_datetime_month_or_year_unit_non_round_float(self, cache, unit):
# GH#50301
# Match Timestamp behavior in disallowing non-round floats with
# Y or M unit
+ warn_msg = "strings will be parsed as datetime strings"
msg = f"Conversion of non-round float with unit={unit} is ambiguous"
with pytest.raises(ValueError, match=msg):
to_datetime([1.5], unit=unit, errors="raise")
with pytest.raises(ValueError, match=msg):
- to_datetime(["1.5"], unit=unit, errors="raise")
+ with tm.assert_produces_warning(FutureWarning, match=warn_msg):
+ to_datetime(["1.5"], unit=unit, errors="raise")
# with errors="ignore" we also end up raising within the Timestamp
# constructor; this may not be ideal
@@ -1742,7 +1744,8 @@ def test_to_datetime_month_or_year_unit_non_round_float(self, cache, unit):
expected = Index([NaT], dtype="M8[ns]")
tm.assert_index_equal(res, expected)
- res = to_datetime(["1.5"], unit=unit, errors="coerce")
+ with tm.assert_produces_warning(FutureWarning, match=warn_msg):
+ res = to_datetime(["1.5"], unit=unit, errors="coerce")
tm.assert_index_equal(res, expected)
# round floats are OK
| - [x] closes #50735 (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50909 | 2023-01-20T21:45:36Z | 2023-01-26T11:04:16Z | 2023-01-26T11:04:16Z | 2023-01-26T16:07:19Z |
DEPR: Remove (Int|UInt|Float)64Index | diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py
index da4a73ce1c80f..de28342c0d526 100644
--- a/pandas/_testing/asserters.py
+++ b/pandas/_testing/asserters.py
@@ -75,8 +75,8 @@ def assert_almost_equal(
right : object
check_dtype : bool or {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
- then `RangeIndex` and `Int64Index` are also considered equivalent
- when doing type checking.
+ then `RangeIndex` and `NumericIndex` with int64 dtype are also considered
+ equivalent when doing type checking.
rtol : float, default 1e-5
Relative tolerance.
@@ -197,7 +197,7 @@ def assert_index_equal(
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
- Int64Index as well.
+ NumericIndex with an int64 dtype as well.
check_names : bool, default True
Whether to check the names attribute.
check_exact : bool, default True
@@ -511,7 +511,7 @@ def assert_interval_array_equal(
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
- Int64Index as well.
+ NumericIndex with an int64 dtype as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py
index b3eb5b1bfdcde..2fbd3a6cdb046 100644
--- a/pandas/compat/pickle_compat.py
+++ b/pandas/compat/pickle_compat.py
@@ -94,8 +94,8 @@ def load_reduce(self):
("pandas.indexes.base", "_new_Index"): ("pandas.core.indexes.base", "_new_Index"),
("pandas.indexes.base", "Index"): ("pandas.core.indexes.base", "Index"),
("pandas.indexes.numeric", "Int64Index"): (
- "pandas.core.indexes.numeric",
- "Int64Index",
+ "pandas.core.indexes.base",
+ "Index", # updated in 50775
),
("pandas.indexes.range", "RangeIndex"): ("pandas.core.indexes.range", "RangeIndex"),
("pandas.indexes.multi", "MultiIndex"): ("pandas.core.indexes.multi", "MultiIndex"),
@@ -119,7 +119,7 @@ def load_reduce(self):
"TimedeltaIndex",
),
("pandas.indexes.numeric", "Float64Index"): (
- "pandas.core.indexes.numeric",
+ "pandas.core.indexes.base",
"Index", # updated in 50775
),
# 50775, remove Int64Index, UInt64Index & Float64Index from codabase
diff --git a/pandas/conftest.py b/pandas/conftest.py
index 1a410f87c8552..2e9638036eec5 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -520,7 +520,7 @@ def multiindex_year_month_day_dataframe_random_data():
"""
tdf = tm.makeTimeDataFrame(100)
ymd = tdf.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum()
- # use Int64Index, to make sure things work
+ # use int64 Index, to make sure things work
ymd.index = ymd.index.set_levels([lev.astype("i8") for lev in ymd.index.levels])
ymd.index.set_names(["year", "month", "day"], inplace=True)
return ymd
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index aa9d1c8152019..a7fa77d1ff0ee 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -1305,7 +1305,7 @@ def __init__(self, obj: DataFrame, n: int, keep: str, columns: IndexLabel) -> No
def compute(self, method: str) -> DataFrame:
- from pandas.core.api import Int64Index
+ from pandas.core.api import NumericIndex
n = self.n
frame = self.obj
@@ -1333,7 +1333,7 @@ def get_indexer(current_indexer, other_indexer):
original_index = frame.index
cur_frame = frame = frame.reset_index(drop=True)
cur_n = n
- indexer = Int64Index([])
+ indexer = NumericIndex([], dtype=np.int64)
for i, column in enumerate(columns):
# For each column we apply method to cur_frame[column].
diff --git a/pandas/core/api.py b/pandas/core/api.py
index 3d2547fcea230..0dffa681acfc1 100644
--- a/pandas/core/api.py
+++ b/pandas/core/api.py
@@ -51,16 +51,13 @@
from pandas.core.indexes.api import (
CategoricalIndex,
DatetimeIndex,
- Float64Index,
Index,
- Int64Index,
IntervalIndex,
MultiIndex,
NumericIndex,
PeriodIndex,
RangeIndex,
TimedeltaIndex,
- UInt64Index,
)
from pandas.core.indexes.datetimes import (
bdate_range,
@@ -101,14 +98,12 @@
"Flags",
"Float32Dtype",
"Float64Dtype",
- "Float64Index",
"Grouper",
"Index",
"IndexSlice",
"Int16Dtype",
"Int32Dtype",
"Int64Dtype",
- "Int64Index",
"Int8Dtype",
"Interval",
"IntervalDtype",
@@ -141,7 +136,6 @@
"UInt16Dtype",
"UInt32Dtype",
"UInt64Dtype",
- "UInt64Index",
"UInt8Dtype",
"unique",
"value_counts",
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 1a2b9728f80a1..50203003a037a 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -726,10 +726,10 @@ def total_seconds(self) -> npt.NDArray[np.float64]:
Returns
-------
- ndarray, Float64Index or Series
+ ndarray, Index or Series
When the calling object is a TimedeltaArray, the return type
is ndarray. When the calling object is a TimedeltaIndex,
- the return type is a Float64Index. When the calling object
+ the return type is an Index with a float64 dtype. When the calling object
is a Series, the return type is Series of type `float64` whose
index is the same as the original.
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 3b122eaa814e5..9efc07628cccd 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -10870,11 +10870,7 @@ def quantile(
f"Invalid method: {method}. Method must be in {valid_method}."
)
if method == "single":
- # error: Argument "qs" to "quantile" of "BlockManager" has incompatible type
- # "Index"; expected "Float64Index"
- res = data._mgr.quantile(
- qs=q, axis=1, interpolation=interpolation # type: ignore[arg-type]
- )
+ res = data._mgr.quantile(qs=q, axis=1, interpolation=interpolation)
elif method == "table":
valid_interpolation = {"nearest", "lower", "higher"}
if interpolation not in valid_interpolation:
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index e0e5c15f6adfc..ffc886711932b 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -880,7 +880,7 @@ def is_in_axis(key) -> bool:
try:
items.get_loc(key)
except (KeyError, TypeError, InvalidIndexError):
- # TypeError shows up here if we pass e.g. Int64Index
+ # TypeError shows up here if we pass e.g. an Index
return False
return True
diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py
index c562eaffd241d..08191363bfc93 100644
--- a/pandas/core/indexes/api.py
+++ b/pandas/core/indexes/api.py
@@ -26,12 +26,7 @@
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.interval import IntervalIndex
from pandas.core.indexes.multi import MultiIndex
-from pandas.core.indexes.numeric import (
- Float64Index,
- Int64Index,
- NumericIndex,
- UInt64Index,
-)
+from pandas.core.indexes.numeric import NumericIndex
from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexes.range import RangeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
@@ -52,12 +47,9 @@
"Index",
"MultiIndex",
"NumericIndex",
- "Float64Index",
- "Int64Index",
"CategoricalIndex",
"IntervalIndex",
"RangeIndex",
- "UInt64Index",
"InvalidIndexError",
"TimedeltaIndex",
"PeriodIndex",
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 4d2e4758817be..fc2c51166a737 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -296,9 +296,6 @@ class Index(IndexOpsMixin, PandasObject):
TimedeltaIndex : Index of timedelta64 data.
PeriodIndex : Index of Period data.
NumericIndex : Index of numpy int/uint/float data.
- Int64Index : Index of purely int64 labels (deprecated).
- UInt64Index : Index of purely uint64 labels (deprecated).
- Float64Index : Index of purely float64 labels (deprecated).
Notes
-----
@@ -498,7 +495,7 @@ def __new__(
klass = cls._dtype_to_subclass(arr.dtype)
- # _ensure_array _may_ be unnecessary once Int64Index etc are gone
+ # _ensure_array _may_ be unnecessary once NumericIndex etc are gone
arr = klass._ensure_array(arr, arr.dtype, copy=False)
return klass._simple_new(arr, name)
@@ -1026,7 +1023,7 @@ def take(
taken = values.take(
indices, allow_fill=allow_fill, fill_value=self._na_value
)
- # _constructor so RangeIndex->Int64Index
+ # _constructor so RangeIndex-> Index with an int64 dtype
return self._constructor._simple_new(taken, name=self.name)
@final
@@ -1097,7 +1094,7 @@ def repeat(self, repeats, axis=None):
nv.validate_repeat((), {"axis": axis})
res_values = self._values.repeat(repeats)
- # _constructor so RangeIndex->Int64Index
+ # _constructor so RangeIndex-> Index with an int64 dtype
return self._constructor._simple_new(res_values, name=self.name)
# --------------------------------------------------------------------
@@ -6228,7 +6225,7 @@ def _maybe_cast_slice_bound(self, label, side: str_t):
"""
# We are a plain index here (sub-class override this method if they
- # wish to have special treatment for floats/ints, e.g. Float64Index and
+ # wish to have special treatment for floats/ints, e.g. NumericIndex and
# datetimelike Indexes
# Special case numeric EA Indexes, since they are not handled by NumericIndex
@@ -6442,7 +6439,7 @@ def delete(self: _IndexT, loc) -> _IndexT:
else:
res_values = values.delete(loc)
- # _constructor so RangeIndex->Int64Index
+ # _constructor so RangeIndex-> Index with an int64 dtype
return self._constructor._simple_new(res_values, name=self.name)
def insert(self, loc: int, item) -> Index:
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index bdaaeb20b3508..fc40ae4a99be0 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -63,7 +63,7 @@
if TYPE_CHECKING:
from pandas.core.api import (
DataFrame,
- Float64Index,
+ NumericIndex,
PeriodIndex,
)
@@ -284,11 +284,11 @@ def to_period(self, freq=None) -> PeriodIndex:
return PeriodIndex._simple_new(arr, name=self.name)
@doc(DatetimeArray.to_julian_date)
- def to_julian_date(self) -> Float64Index:
- from pandas.core.indexes.api import Float64Index
+ def to_julian_date(self) -> NumericIndex:
+ from pandas.core.indexes.api import NumericIndex
arr = self._data.to_julian_date()
- return Float64Index._simple_new(arr, name=self.name)
+ return NumericIndex._simple_new(arr, name=self.name)
@doc(DatetimeArray.isocalendar)
def isocalendar(self) -> DataFrame:
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index c6bd7b8aae980..3b0b5d1d55a88 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -520,7 +520,7 @@ def _maybe_convert_i8(self, key):
-------
scalar or list-like
The original key if no conversion occurred, int if converted scalar,
- Int64Index if converted list-like.
+ Index with an int64 dtype if converted list-like.
"""
if is_list_like(key):
key = ensure_index(key)
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index d7fba354ceead..14240f1d19472 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -1642,7 +1642,7 @@ def get_level_values(self, level):
level_1 int64
dtype: object
>>> pd.MultiIndex.from_arrays([[1, None, 2], [3, 4, 5]]).get_level_values(0)
- Float64Index([1.0, nan, 2.0], dtype='float64')
+ NumericIndex([1.0, nan, 2.0], dtype='float64')
"""
level = self._get_level_number(level)
values = self._get_level_values(level)
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index 3ea7b30f7e9f1..fab741ec1ba18 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -1,16 +1,10 @@
from __future__ import annotations
-from typing import (
- Callable,
- Hashable,
-)
+from typing import Callable
import numpy as np
-from pandas._libs import (
- index as libindex,
- lib,
-)
+from pandas._libs import index as libindex
from pandas._typing import (
Dtype,
npt,
@@ -26,8 +20,6 @@
is_integer_dtype,
is_numeric_dtype,
is_scalar,
- is_signed_integer_dtype,
- is_unsigned_integer_dtype,
pandas_dtype,
)
from pandas.core.dtypes.generic import ABCSeries
@@ -68,9 +60,6 @@ class NumericIndex(Index):
See Also
--------
Index : The base pandas Index type.
- Int64Index : Index of purely int64 labels (deprecated).
- UInt64Index : Index of purely uint64 labels (deprecated).
- Float64Index : Index of purely float64 labels (deprecated).
Notes
-----
@@ -146,18 +135,12 @@ def _ensure_array(cls, data, dtype, copy: bool):
if not isinstance(data, (ABCSeries, list, tuple)):
data = list(data)
- orig = data
if isinstance(data, (list, tuple)):
if len(data):
data = sanitize_array(data, index=None)
else:
data = np.array([], dtype=np.int64)
- if dtype is None and data.dtype.kind == "f":
- if cls is UInt64Index and (data >= 0).all():
- # https://github.com/numpy/numpy/issues/19146
- data = np.asarray(orig, dtype=np.uint64)
-
dtype = cls._ensure_dtype(dtype)
if copy or not is_dtype_equal(data.dtype, dtype):
@@ -199,8 +182,6 @@ def _validate_dtype(cls, dtype: Dtype | None) -> None:
@classmethod
def _ensure_dtype(cls, dtype: Dtype | None) -> np.dtype | None:
"""
- Ensure int64 dtype for Int64Index etc. but allow int32 etc. for NumericIndex.
-
Assumes dtype has already been validated.
"""
if dtype is None:
@@ -243,14 +224,6 @@ def _maybe_cast_slice_bound(self, label, side: str):
# ----------------------------------------------------------------
- @doc(Index._shallow_copy)
- def _shallow_copy(self, values, name: Hashable = lib.no_default):
- if not self._can_hold_na and values.dtype.kind == "f":
- name = self._name if name is lib.no_default else name
- # Ensure we are not returning an Int64Index with float data:
- return Float64Index._simple_new(values, name=name)
- return super()._shallow_copy(values=values, name=name)
-
def _convert_tolerance(self, tolerance, target):
tolerance = super()._convert_tolerance(tolerance, target)
@@ -308,108 +281,3 @@ def _format_native_types(
quoting=quoting,
**kwargs,
)
-
-
-_num_index_shared_docs = {}
-
-
-_num_index_shared_docs[
- "class_descr"
-] = """
- Immutable sequence used for indexing and alignment.
-
- .. deprecated:: 1.4.0
- In pandas v2.0 %(klass)s will be removed and :class:`NumericIndex` used instead.
- %(klass)s will remain fully functional for the duration of pandas 1.x.
-
- The basic object storing axis labels for all pandas objects.
- %(klass)s is a special case of `Index` with purely %(ltype)s labels. %(extra)s.
-
- Parameters
- ----------
- data : array-like (1-dimensional)
- dtype : NumPy dtype (default: %(dtype)s)
- copy : bool
- Make a copy of input ndarray.
- name : object
- Name to be stored in the index.
-
- Attributes
- ----------
- None
-
- Methods
- -------
- None
-
- See Also
- --------
- Index : The base pandas Index type.
- NumericIndex : Index of numpy int/uint/float data.
-
- Notes
- -----
- An Index instance can **only** contain hashable objects.
-"""
-
-
-class IntegerIndex(NumericIndex):
- """
- This is an abstract class for Int64Index, UInt64Index.
- """
-
- _is_backward_compat_public_numeric_index: bool = False
-
-
-class Int64Index(IntegerIndex):
- _index_descr_args = {
- "klass": "Int64Index",
- "ltype": "integer",
- "dtype": "int64",
- "extra": "",
- }
- __doc__ = _num_index_shared_docs["class_descr"] % _index_descr_args
-
- _typ = "int64index"
- _default_dtype = np.dtype(np.int64)
- _dtype_validation_metadata = (is_signed_integer_dtype, "signed integer")
-
- @property
- def _engine_type(self) -> type[libindex.Int64Engine]:
- return libindex.Int64Engine
-
-
-class UInt64Index(IntegerIndex):
- _index_descr_args = {
- "klass": "UInt64Index",
- "ltype": "unsigned integer",
- "dtype": "uint64",
- "extra": "",
- }
- __doc__ = _num_index_shared_docs["class_descr"] % _index_descr_args
-
- _typ = "uint64index"
- _default_dtype = np.dtype(np.uint64)
- _dtype_validation_metadata = (is_unsigned_integer_dtype, "unsigned integer")
-
- @property
- def _engine_type(self) -> type[libindex.UInt64Engine]:
- return libindex.UInt64Engine
-
-
-class Float64Index(NumericIndex):
- _index_descr_args = {
- "klass": "Float64Index",
- "dtype": "float64",
- "ltype": "float",
- "extra": "",
- }
- __doc__ = _num_index_shared_docs["class_descr"] % _index_descr_args
-
- _typ = "float64index"
- _default_dtype = np.dtype(np.float64)
- _dtype_validation_metadata = (is_float_dtype, "float")
-
- @property
- def _engine_type(self) -> type[libindex.Float64Engine]:
- return libindex.Float64Engine
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index 877bb2844e8c9..b32f0954a7c30 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -45,7 +45,6 @@
Index,
)
from pandas.core.indexes.extension import inherit_names
-from pandas.core.indexes.numeric import Int64Index
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update({"target_klass": "PeriodIndex or list of Periods"})
@@ -183,18 +182,18 @@ def to_timestamp(self, freq=None, how: str = "start") -> DatetimeIndex:
@property
@doc(PeriodArray.hour.fget)
- def hour(self) -> Int64Index:
- return Int64Index(self._data.hour, name=self.name)
+ def hour(self) -> Index:
+ return Index(self._data.hour, name=self.name)
@property
@doc(PeriodArray.minute.fget)
- def minute(self) -> Int64Index:
- return Int64Index(self._data.minute, name=self.name)
+ def minute(self) -> Index:
+ return Index(self._data.minute, name=self.name)
@property
@doc(PeriodArray.second.fget)
- def second(self) -> Int64Index:
- return Int64Index(self._data.second, name=self.name)
+ def second(self) -> Index:
+ return Index(self._data.second, name=self.name)
# ------------------------------------------------------------------------
# Index Constructors
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 636e376197ef1..a7b19e3180fff 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -2081,7 +2081,7 @@ def _setitem_with_indexer_missing(self, indexer, value):
new_index = index.insert(len(index), indexer)
# we have a coerced indexer, e.g. a float
- # that matches in an Int64Index, so
+ # that matches in an int64 Index, so
# we will not create a duplicate index, rather
# index to that element
# e.g. 0.0 -> 0
diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py
index b8ef925362e7b..82316806d3d47 100644
--- a/pandas/core/internals/array_manager.py
+++ b/pandas/core/internals/array_manager.py
@@ -4,7 +4,6 @@
from __future__ import annotations
from typing import (
- TYPE_CHECKING,
Any,
Callable,
Hashable,
@@ -94,10 +93,6 @@
to_native_types,
)
-if TYPE_CHECKING:
- from pandas.core.api import Float64Index
-
-
T = TypeVar("T", bound="BaseArrayManager")
@@ -1013,7 +1008,7 @@ def operate_blockwise(self, other: ArrayManager, array_op) -> ArrayManager:
def quantile(
self,
*,
- qs: Float64Index,
+ qs: Index, # with dtype float64
axis: AxisInt = 0,
transposed: bool = False,
interpolation: QuantileInterpolation = "linear",
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 4bb4882574228..00ab9d02cee00 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -1288,8 +1288,8 @@ def quantile(
Parameters
----------
- qs : Float64Index
- List of the quantiles to be computed.
+ qs : Index
+ The quantiles to be computed in float64.
interpolation : str, default 'linear'
Type of interpolation.
axis : int, default 0
| Actually removes `Int64Index`, `UInt64Index` & `Float64Index`. Also does some cleanups that previous deprecation PRs didn't catch.
This PR formally achieves the goals set out in #42717. In real terms there are a few outstanding items, which still need to be addressed:
* Move `NumericIndex` into `Index`
* Update docs and write the `whatsnew` entry for this change
* Make the tests in `pandas/tests/arithmetic` cover 8/16/32 bit dtypes
* a few minor issues
So I'd like to kep #42717 open until those are resolved.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50908 | 2023-01-20T21:45:02Z | 2023-01-22T18:53:42Z | 2023-01-22T18:53:42Z | 2023-01-22T18:53:50Z |
BUG: JSON serialization with orient split fails roundtrip with MultiIndex | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 0ef5636a97d40..082963fe806ee 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -907,6 +907,7 @@ I/O
- Fixed memory leak which stemmed from the initialization of the internal JSON module (:issue:`49222`)
- Fixed issue where :func:`json_normalize` would incorrectly remove leading characters from column names that matched the ``sep`` argument (:issue:`49861`)
- Bug in :meth:`DataFrame.to_json` where it would segfault when failing to encode a string (:issue:`50307`)
+- Bug in :meth:`DataFrame.to_json` where it would produce duplicate column names for orient=split (:issue:`50456`)
Period
^^^^^^
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index f2780d5fa6832..72f59ca8e753d 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -182,6 +182,20 @@ def to_json(
indent=indent,
).write()
+ if orient == "split" and isinstance(obj, DataFrame):
+ if isinstance(obj.columns, MultiIndex):
+ lst = []
+ # backwards of multindex.fromArray
+ for i in range(len(obj.columns[0])):
+ sub = []
+ for j in range(len(obj.columns)):
+ sub.append(obj.columns[j][i])
+ lst.append(sub)
+ newS = loads(s)
+ # fixes columns to original columns
+ newS["columns"] = lst
+ s = dumps(newS)
+
if lines:
s = convert_to_line_delimits(s)
diff --git a/pandas/tests/io/json/Untitled-1.ipynb b/pandas/tests/io/json/Untitled-1.ipynb
new file mode 100644
index 0000000000000..a61008429ceef
--- /dev/null
+++ b/pandas/tests/io/json/Untitled-1.ipynb
@@ -0,0 +1,19 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "language_info": {
+ "name": "python"
+ },
+ "orig_nbformat": 4
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index aff09a62b0df3..4408ef93ca40b 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -1528,13 +1528,6 @@ def test_timedelta_as_label(self, date_format, key):
("index", "{\"('a', 'b')\":{\"('c', 'd')\":1}}"),
("columns", "{\"('c', 'd')\":{\"('a', 'b')\":1}}"),
# TODO: the below have separate encoding procedures
- pytest.param(
- "split",
- "",
- marks=pytest.mark.xfail(
- reason="Produces JSON but not in a consistent manner"
- ),
- ),
pytest.param(
"table",
"",
diff --git a/pp.py b/pp.py
new file mode 100644
index 0000000000000..d6f29d682561b
--- /dev/null
+++ b/pp.py
@@ -0,0 +1,3 @@
+import pandas
+print(pandas.__version__)
+!pip install pytest
\ No newline at end of file
| - Closes #50456
- xFail for mark split deprecated.
- All [code checks passed].
- Added an entry in the latest `doc/source/whatsnew/v2.0.0.rst`
| https://api.github.com/repos/pandas-dev/pandas/pulls/50904 | 2023-01-20T20:27:05Z | 2023-02-24T20:05:01Z | null | 2023-02-24T20:05:02Z |
ENH: Use circular weakref to delay copy in setitem | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index eb0eb34dbefc4..012b6577690ba 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -31,6 +31,7 @@
overload,
)
import warnings
+import weakref
import numpy as np
from numpy import ma
@@ -4043,12 +4044,12 @@ def _iset_item_mgr(
self._mgr.iset(loc, value, inplace=inplace)
self._clear_item_cache()
- def _set_item_mgr(self, key, value: ArrayLike) -> None:
+ def _set_item_mgr(self, key, value: ArrayLike, refs=[]) -> None:
try:
loc = self._info_axis.get_loc(key)
except KeyError:
# This item wasn't present, just insert at end
- self._mgr.insert(len(self._info_axis), key, value)
+ self._mgr.insert(len(self._info_axis), key, value, ref=refs)
else:
self._iset_item_mgr(loc, value)
@@ -4078,7 +4079,17 @@ def _set_item(self, key, value) -> None:
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
- value = self._sanitize_column(value)
+ orig_value = None
+ refs = None
+ copy = True
+ if using_copy_on_write() and isinstance(value, Series):
+ refs = []
+ block = value._mgr.blocks[0]
+ refs.append(weakref.ref(block))
+ orig_value = value
+ copy = False
+
+ value = self._sanitize_column(value, copy=copy)
if (
key in self.columns
@@ -4091,7 +4102,19 @@ def _set_item(self, key, value) -> None:
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1)).T
- self._set_item_mgr(key, value)
+ self._set_item_mgr(key, value, refs=refs)
+
+ # Also make a ref back to the DF in the Series so modifying the Series
+ # doesn't change DF (triggers a CoW)
+ # TODO: Make sure the weakref is not dead?
+ if orig_value is not None and not orig_value._mgr.refs:
+ # If the series already has refs (e.g. another DF contains it as a column),
+ # then modifying it will already trigger
+ # a CoW, so we are good
+ loc = self._info_axis.get_loc(key)
+ blkno = self._mgr.blknos[loc]
+ blk = self._mgr.blocks[blkno]
+ orig_value._mgr.refs = [weakref.ref(blk)]
def _set_value(
self, index: IndexLabel, col, value: Scalar, takeable: bool = False
@@ -4795,14 +4818,17 @@ def assign(self, **kwargs) -> DataFrame:
data[k] = com.apply_if_callable(v, data)
return data
- def _sanitize_column(self, value) -> ArrayLike:
+ def _sanitize_column(self, value, copy=False) -> ArrayLike:
"""
Ensures new columns (which go into the BlockManager as new blocks) are
- always copied and converted into an array.
+ converted into an array.
Parameters
----------
value : scalar, Series, or array-like
+ copy : bool, default False
+ Whether to copy new columns. You would want to turn this off if CoW
+ is on, and instead set refs appropriately.
Returns
-------
@@ -4815,11 +4841,12 @@ def _sanitize_column(self, value) -> ArrayLike:
if isinstance(value, DataFrame):
return _reindex_for_setitem(value, self.index)
elif is_dict_like(value):
- return _reindex_for_setitem(Series(value), self.index)
+ return _reindex_for_setitem(Series(value), self.index, copy=copy)
if is_list_like(value):
com.require_length_match(value, self.index)
- return sanitize_array(value, self.index, copy=True, allow_2d=True)
+
+ return sanitize_array(value, self.index, copy=copy, allow_2d=True)
@property
def _series(self):
@@ -11517,11 +11544,16 @@ def _from_nested_dict(data) -> collections.defaultdict:
return new_data
-def _reindex_for_setitem(value: DataFrame | Series, index: Index) -> ArrayLike:
+def _reindex_for_setitem(
+ value: DataFrame | Series, index: Index, copy=True
+) -> ArrayLike:
# reindex if necessary
if value.index.equals(index) or not len(index):
- return value._values.copy()
+ ret_values = value._values
+ if copy:
+ ret_values = ret_values.copy()
+ return ret_values
# GH#4107
try:
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 1a0aba0778da5..b5807ffb5c8cd 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -1375,7 +1375,7 @@ def column_setitem(
new_mgr = col_mgr.setitem((idx,), value)
self.iset(loc, new_mgr._block.values, inplace=True)
- def insert(self, loc: int, item: Hashable, value: ArrayLike) -> None:
+ def insert(self, loc: int, item: Hashable, value: ArrayLike, ref=None) -> None:
"""
Insert item at selected position.
@@ -1384,6 +1384,8 @@ def insert(self, loc: int, item: Hashable, value: ArrayLike) -> None:
loc : int
item : hashable
value : np.ndarray or ExtensionArray
+ ref: weakref.ref or None
+ A weakref pointing to the Block that owns the value or None
"""
# insert to the axis; this could possibly raise a TypeError
new_axis = self.items.insert(loc, item)
@@ -1410,9 +1412,13 @@ def insert(self, loc: int, item: Hashable, value: ArrayLike) -> None:
self.axes[0] = new_axis
self.blocks += (block,)
- # TODO(CoW) do we always "own" the passed `value`?
- if self.refs is not None:
- self.refs += [None]
+
+ if ref:
+ if self.refs is not None:
+ self.refs.append(ref)
+ else:
+ self.refs = [None] * (len(self.blocks) - 1)
+ self.refs += ref
self._known_consolidated = False
diff --git a/pandas/tests/copy_view/test_setitem.py b/pandas/tests/copy_view/test_setitem.py
index 9e0d350dde0de..a85089c637fe5 100644
--- a/pandas/tests/copy_view/test_setitem.py
+++ b/pandas/tests/copy_view/test_setitem.py
@@ -29,14 +29,13 @@ def test_set_column_with_array():
def test_set_column_with_series(using_copy_on_write):
# Case: setting a series as a new column (df[col] = s) copies that data
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
- ser = Series([1, 2, 3])
+ ser = Series([1, 2, 3], name="c")
+ ser_orig = ser.copy()
df["c"] = ser
if using_copy_on_write:
- # TODO(CoW) with CoW we can delay the copy
- # assert np.shares_memory(df["c"].values, ser.values)
- assert not np.shares_memory(df["c"].values, ser.values)
+ assert np.shares_memory(df["c"].values, ser.values)
else:
# the series data is copied
assert not np.shares_memory(df["c"].values, ser.values)
@@ -44,7 +43,15 @@ def test_set_column_with_series(using_copy_on_write):
# and modifying the series does not modify the DataFrame
ser.iloc[0] = 0
assert ser.iloc[0] == 0
- tm.assert_series_equal(df["c"], Series([1, 2, 3], name="c"))
+ tm.assert_series_equal(df["c"], ser_orig)
+
+ # Update ser_orig now, so we can check if ser changed
+ ser_orig.iloc[0] = 0
+
+ # and modifying the DataFrame doesn't modify the Series
+ df.loc[0, "c"] = 10
+ assert df.loc[0, "c"] == 10
+ tm.assert_series_equal(ser, ser_orig)
def test_set_column_with_index(using_copy_on_write):
@@ -79,12 +86,12 @@ def test_set_columns_with_dataframe(using_copy_on_write):
df[["c", "d"]] = df2
if using_copy_on_write:
- # TODO(CoW) with CoW we can delay the copy
- # assert np.shares_memory(df["c"].values, df2["c"].values)
- assert not np.shares_memory(df["c"].values, df2["c"].values)
+ assert np.shares_memory(df["c"].values, df2["c"].values)
+ assert np.shares_memory(df["d"].values, df2["d"].values)
else:
# the data is copied
assert not np.shares_memory(df["c"].values, df2["c"].values)
+ assert not np.shares_memory(df["d"].values, df2["d"].values)
# and modifying the set DataFrame does not modify the original DataFrame
df2.iloc[0, 0] = 0
| - [ ] xref #48998 (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
This PR uses a circular weakref between the added block in the DataFrame and the original block from the Series, when doing setitem in a DataFrame with a Series (haven't fixed other cases yet, wanted to get feedback on whether I'm on the right track).
With this way, whoever modifies the data first will have to copy the data, and we can delay the copy in ``__setitem__``.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50902 | 2023-01-20T20:10:28Z | 2023-02-23T01:29:20Z | null | 2023-03-14T12:16:03Z |
ENH: pd.NA comparison with time, date, timedelta | diff --git a/pandas/_libs/missing.pyx b/pandas/_libs/missing.pyx
index fc94d221a63b9..e6516b004a973 100644
--- a/pandas/_libs/missing.pyx
+++ b/pandas/_libs/missing.pyx
@@ -3,6 +3,11 @@ import numbers
from sys import maxsize
cimport cython
+from cpython.datetime cimport (
+ date,
+ time,
+ timedelta,
+)
from cython cimport Py_ssize_t
import numpy as np
@@ -307,6 +312,7 @@ def is_numeric_na(values: ndarray) -> ndarray:
def _create_binary_propagating_op(name, is_divmod=False):
+ is_cmp = name.strip("_") in ["eq", "ne", "le", "lt", "ge", "gt"]
def method(self, other):
if (other is C_NA or isinstance(other, (str, bytes))
@@ -329,6 +335,9 @@ def _create_binary_propagating_op(name, is_divmod=False):
else:
return out
+ elif is_cmp and isinstance(other, (date, time, timedelta)):
+ return NA
+
return NotImplemented
method.__name__ = name
diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py
index 2df410dff2b00..54d41fa9d972a 100644
--- a/pandas/tests/extension/base/methods.py
+++ b/pandas/tests/extension/base/methods.py
@@ -4,6 +4,8 @@
import numpy as np
import pytest
+from pandas._typing import Dtype
+
from pandas.core.dtypes.common import is_bool_dtype
from pandas.core.dtypes.missing import na_value_for_dtype
@@ -260,6 +262,9 @@ def test_fillna_length_mismatch(self, data_missing):
with pytest.raises(ValueError, match=msg):
data_missing.fillna(data_missing.take([1]))
+ # Subclasses can override if we expect e.g Sparse[bool], boolean, pyarrow[bool]
+ _combine_le_expected_dtype: Dtype = np.dtype(bool)
+
def test_combine_le(self, data_repeated):
# GH 20825
# Test that combine works when doing a <= (le) comparison
@@ -268,13 +273,17 @@ def test_combine_le(self, data_repeated):
s2 = pd.Series(orig_data2)
result = s1.combine(s2, lambda x1, x2: x1 <= x2)
expected = pd.Series(
- [a <= b for (a, b) in zip(list(orig_data1), list(orig_data2))]
+ [a <= b for (a, b) in zip(list(orig_data1), list(orig_data2))],
+ dtype=self._combine_le_expected_dtype,
)
self.assert_series_equal(result, expected)
val = s1.iloc[0]
result = s1.combine(val, lambda x1, x2: x1 <= x2)
- expected = pd.Series([a <= val for a in list(orig_data1)])
+ expected = pd.Series(
+ [a <= val for a in list(orig_data1)],
+ dtype=self._combine_le_expected_dtype,
+ )
self.assert_series_equal(result, expected)
def test_combine_add(self, data_repeated):
diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py
index 2467471e3643e..eef77ceabb6fe 100644
--- a/pandas/tests/extension/test_arrow.py
+++ b/pandas/tests/extension/test_arrow.py
@@ -972,11 +972,7 @@ def test_factorize(self, data_for_grouping, request):
)
super().test_factorize(data_for_grouping)
- @pytest.mark.xfail(
- reason="result dtype pyarrow[bool] better than expected dtype object"
- )
- def test_combine_le(self, data_repeated):
- super().test_combine_le(data_repeated)
+ _combine_le_expected_dtype = "bool[pyarrow]"
def test_combine_add(self, data_repeated, request):
pa_dtype = next(data_repeated(1)).dtype.pyarrow_dtype
diff --git a/pandas/tests/extension/test_boolean.py b/pandas/tests/extension/test_boolean.py
index b611701e4e429..8c8cbfa5200b1 100644
--- a/pandas/tests/extension/test_boolean.py
+++ b/pandas/tests/extension/test_boolean.py
@@ -176,6 +176,8 @@ class TestReshaping(base.BaseReshapingTests):
class TestMethods(base.BaseMethodsTests):
+ _combine_le_expected_dtype = "boolean"
+
def test_factorize(self, data_for_grouping):
# override because we only have 2 unique values
labels, uniques = pd.factorize(data_for_grouping, use_na_sentinel=True)
@@ -185,23 +187,6 @@ def test_factorize(self, data_for_grouping):
tm.assert_numpy_array_equal(labels, expected_labels)
self.assert_extension_array_equal(uniques, expected_uniques)
- def test_combine_le(self, data_repeated):
- # override because expected needs to be boolean instead of bool dtype
- orig_data1, orig_data2 = data_repeated(2)
- s1 = pd.Series(orig_data1)
- s2 = pd.Series(orig_data2)
- result = s1.combine(s2, lambda x1, x2: x1 <= x2)
- expected = pd.Series(
- [a <= b for (a, b) in zip(list(orig_data1), list(orig_data2))],
- dtype="boolean",
- )
- self.assert_series_equal(result, expected)
-
- val = s1.iloc[0]
- result = s1.combine(val, lambda x1, x2: x1 <= x2)
- expected = pd.Series([a <= val for a in list(orig_data1)], dtype="boolean")
- self.assert_series_equal(result, expected)
-
def test_searchsorted(self, data_for_sorting, as_series):
# override because we only have 2 unique values
data_for_sorting = pd.array([True, False], dtype="boolean")
diff --git a/pandas/tests/extension/test_floating.py b/pandas/tests/extension/test_floating.py
index f67f7dc56d26f..60c78b46a4832 100644
--- a/pandas/tests/extension/test_floating.py
+++ b/pandas/tests/extension/test_floating.py
@@ -173,7 +173,7 @@ class TestMissing(base.BaseMissingTests):
class TestMethods(base.BaseMethodsTests):
- pass
+ _combine_le_expected_dtype = object # TODO: can we make this boolean?
class TestCasting(base.BaseCastingTests):
diff --git a/pandas/tests/extension/test_integer.py b/pandas/tests/extension/test_integer.py
index 788a0bf46afc5..936764c3627d0 100644
--- a/pandas/tests/extension/test_integer.py
+++ b/pandas/tests/extension/test_integer.py
@@ -201,7 +201,7 @@ class TestMissing(base.BaseMissingTests):
class TestMethods(base.BaseMethodsTests):
- pass
+ _combine_le_expected_dtype = object # TODO: can we make this boolean?
class TestCasting(base.BaseCastingTests):
diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py
index 9c5b3426246a8..745911871694c 100644
--- a/pandas/tests/extension/test_sparse.py
+++ b/pandas/tests/extension/test_sparse.py
@@ -270,28 +270,7 @@ def test_fillna_frame(self, data_missing):
class TestMethods(BaseSparseTests, base.BaseMethodsTests):
- def test_combine_le(self, data_repeated):
- # We return a Series[SparseArray].__le__ returns a
- # Series[Sparse[bool]]
- # rather than Series[bool]
- orig_data1, orig_data2 = data_repeated(2)
- s1 = pd.Series(orig_data1)
- s2 = pd.Series(orig_data2)
- result = s1.combine(s2, lambda x1, x2: x1 <= x2)
- expected = pd.Series(
- SparseArray(
- [a <= b for (a, b) in zip(list(orig_data1), list(orig_data2))],
- fill_value=False,
- )
- )
- self.assert_series_equal(result, expected)
-
- val = s1.iloc[0]
- result = s1.combine(val, lambda x1, x2: x1 <= x2)
- expected = pd.Series(
- SparseArray([a <= val for a in list(orig_data1)], fill_value=False)
- )
- self.assert_series_equal(result, expected)
+ _combine_le_expected_dtype = "Sparse[bool]"
def test_fillna_copy_frame(self, data_missing):
arr = data_missing.take([1, 1])
diff --git a/pandas/tests/scalar/test_na_scalar.py b/pandas/tests/scalar/test_na_scalar.py
index a77316cbc0ea6..bdeb11dbb8f19 100644
--- a/pandas/tests/scalar/test_na_scalar.py
+++ b/pandas/tests/scalar/test_na_scalar.py
@@ -1,3 +1,8 @@
+from datetime import (
+ date,
+ time,
+ timedelta,
+)
import pickle
import numpy as np
@@ -67,7 +72,21 @@ def test_arithmetic_ops(all_arithmetic_functions, other):
@pytest.mark.parametrize(
- "other", [NA, 1, 1.0, "a", b"a", np.int64(1), np.nan, np.bool_(True)]
+ "other",
+ [
+ NA,
+ 1,
+ 1.0,
+ "a",
+ b"a",
+ np.int64(1),
+ np.nan,
+ np.bool_(True),
+ time(0),
+ date(1, 2, 3),
+ timedelta(1),
+ pd.NaT,
+ ],
)
def test_comparison_ops(comparison_op, other):
assert comparison_op(NA, other) is NA
| - [x] closes #34104
Fixes a handful of pyarrow xfails | https://api.github.com/repos/pandas-dev/pandas/pulls/50901 | 2023-01-20T18:38:26Z | 2023-01-25T17:20:34Z | 2023-01-25T17:20:34Z | 2023-01-25T17:42:08Z |
CLN: Refactor block splitting into its own method | diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 1a0aba0778da5..71d8b20f18457 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -1220,13 +1220,7 @@ def value_getitem(placement):
if inplace and blk.should_store(value):
# Updating inplace -> check if we need to do Copy-on-Write
if using_copy_on_write() and not self._has_no_reference_block(blkno_l):
- nbs_tup = tuple(blk.delete(blk_locs))
- first_nb = new_block_2d(
- value_getitem(val_locs), BlockPlacement(blk.mgr_locs[blk_locs])
- )
- if self.refs is not None:
- self.refs.extend([self.refs[blkno_l]] * len(nbs_tup))
- self._clear_reference_block(blkno_l)
+ self._iset_split_block(blkno_l, blk_locs, value_getitem(val_locs))
else:
blk.set_inplace(blk_locs, value_getitem(val_locs))
continue
@@ -1239,24 +1233,8 @@ def value_getitem(placement):
removed_blknos.append(blkno_l)
continue
else:
- nbs = blk.delete(blk_locs)
- # Add first block where old block was and remaining blocks at
- # the end to avoid updating all block numbers
- first_nb = nbs[0]
- nbs_tup = tuple(nbs[1:])
- nr_blocks = len(self.blocks)
- blocks_tup = (
- self.blocks[:blkno_l]
- + (first_nb,)
- + self.blocks[blkno_l + 1 :]
- + nbs_tup
- )
- self.blocks = blocks_tup
- self._blklocs[first_nb.mgr_locs.indexer] = np.arange(len(first_nb))
-
- for i, nb in enumerate(nbs_tup):
- self._blklocs[nb.mgr_locs.indexer] = np.arange(len(nb))
- self._blknos[nb.mgr_locs.indexer] = i + nr_blocks
+ # Defer setting the new values to enable consolidation
+ self._iset_split_block(blkno_l, blk_locs)
if len(removed_blknos):
# Remove blocks & update blknos and refs accordingly
@@ -1320,6 +1298,57 @@ def value_getitem(placement):
# Newly created block's dtype may already be present.
self._known_consolidated = False
+ def _iset_split_block(
+ self, blkno_l: int, blk_locs: np.ndarray, value: ArrayLike | None = None
+ ) -> None:
+ """Removes columns from a block by splitting the block.
+
+ Avoids copying the whole block through slicing and updates the manager
+ after determinint the new block structure. Optionally adds a new block,
+ otherwise has to be done by the caller.
+
+ Parameters
+ ----------
+ blkno_l: The block number to operate on, relevant for updating the manager
+ blk_locs: The locations of our block that should be deleted.
+ value: The value to set as a replacement.
+ """
+ blk = self.blocks[blkno_l]
+
+ if self._blklocs is None:
+ self._rebuild_blknos_and_blklocs()
+
+ nbs_tup = tuple(blk.delete(blk_locs))
+ if value is not None:
+ # error: No overload variant of "__getitem__" of "BlockPlacement" matches
+ # argument type "ndarray[Any, Any]" [call-overload]
+ first_nb = new_block_2d(
+ value,
+ BlockPlacement(blk.mgr_locs[blk_locs]), # type: ignore[call-overload]
+ )
+ else:
+ first_nb = nbs_tup[0]
+ nbs_tup = tuple(nbs_tup[1:])
+
+ if self.refs is not None:
+ self.refs.extend([self.refs[blkno_l]] * len(nbs_tup))
+
+ if value is not None:
+ # Only clear if we set new values
+ self._clear_reference_block(blkno_l)
+
+ nr_blocks = len(self.blocks)
+ blocks_tup = (
+ self.blocks[:blkno_l] + (first_nb,) + self.blocks[blkno_l + 1 :] + nbs_tup
+ )
+ self.blocks = blocks_tup
+
+ self._blklocs[first_nb.mgr_locs.indexer] = np.arange(len(first_nb))
+
+ for i, nb in enumerate(nbs_tup):
+ self._blklocs[nb.mgr_locs.indexer] = np.arange(len(nb))
+ self._blknos[nb.mgr_locs.indexer] = i + nr_blocks
+
def _iset_single(
self, loc: int, value: ArrayLike, inplace: bool, blkno: int, blk: Block
) -> None:
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index c253c4bd1ca53..d5702a545c0d8 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -9,6 +9,7 @@
import pytest
from pandas._libs.internals import BlockPlacement
+from pandas.compat import IS64
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_scalar
@@ -883,6 +884,30 @@ def test_validate_bool_args(self, value):
with pytest.raises(ValueError, match=msg):
bm1.replace_list([1], [2], inplace=value)
+ def test_iset_split_block(self):
+ bm = create_mgr("a,b,c: i8; d: f8")
+ bm._iset_split_block(0, np.array([0]))
+ tm.assert_numpy_array_equal(
+ bm.blklocs, np.array([0, 0, 1, 0], dtype="int64" if IS64 else "int32")
+ )
+ # First indexer currently does not have a block associated with it in case
+ tm.assert_numpy_array_equal(
+ bm.blknos, np.array([0, 0, 0, 1], dtype="int64" if IS64 else "int32")
+ )
+ assert len(bm.blocks) == 2
+
+ def test_iset_split_block_values(self):
+ bm = create_mgr("a,b,c: i8; d: f8")
+ bm._iset_split_block(0, np.array([0]), np.array([list(range(10))]))
+ tm.assert_numpy_array_equal(
+ bm.blklocs, np.array([0, 0, 1, 0], dtype="int64" if IS64 else "int32")
+ )
+ # First indexer currently does not have a block associated with it in case
+ tm.assert_numpy_array_equal(
+ bm.blknos, np.array([0, 2, 2, 1], dtype="int64" if IS64 else "int32")
+ )
+ assert len(bm.blocks) == 3
+
def _as_array(mgr):
if mgr.ndim == 1:
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50900 | 2023-01-20T16:51:41Z | 2023-01-27T23:30:15Z | 2023-01-27T23:30:15Z | 2023-01-27T23:30:19Z |
CI: Trigger preview of the web/docs | diff --git a/.github/workflows/docbuild-and-upload.yml b/.github/workflows/docbuild-and-upload.yml
index 73e59134c904a..d2df75777e049 100644
--- a/.github/workflows/docbuild-and-upload.yml
+++ b/.github/workflows/docbuild-and-upload.yml
@@ -89,3 +89,10 @@ jobs:
name: website
path: web/build
retention-days: 14
+
+ - name: Trigger web/doc preview
+ run: curl -X POST https://pandas.pydata.org/preview/submit/$RUN_ID/$PR_ID/
+ env:
+ RUN_ID: ${{ github.run_id }}
+ PR_ID: ${{ github.event.pull_request.number }}
+ if: github.event_name == 'pull_request'
diff --git a/.github/workflows/preview-docs.yml b/.github/workflows/preview-docs.yml
new file mode 100644
index 0000000000000..8f73db283289c
--- /dev/null
+++ b/.github/workflows/preview-docs.yml
@@ -0,0 +1,22 @@
+name: Preview docs
+on:
+ issue_comment:
+ types: created
+
+permissions:
+ contents: read
+
+jobs:
+ preview_docs:
+ permissions:
+ issues: write
+ pull-requests: write
+ runs-on: ubuntu-22.04
+ steps:
+ - if: github.event.comment.body == '/preview'
+ run: |
+ if curl --output /dev/null --silent --head --fail "https://pandas.pydata.org/preview/${{ github.event.issue.number }}/"; then
+ curl -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" -d '{"body": "Website preview of this PR available at: https://pandas.pydata.org/preview/${{ github.event.issue.number }}/"}' https://api.github.com/repos/${{ github.repository }}/issues/${{ github.event.issue.number }}/comments
+ else
+ curl -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" -d '{"body": "No preview found for PR #${{ github.event.issue.number }}. Did the docs build complete?"}' https://api.github.com/repos/${{ github.repository }}/issues/${{ github.event.issue.number }}/comments
+ fi
| xref #50832
This call tells a simple service in our new server to preview the docs to download the artifact and uncompress it in a directory with the number of the PR in: https://pandas.pydata.org/preview/
Ideally we want to get #50897 merged before this one (otherwise the website can't be navigated clicking at the links).
Still need to check if the artifact is ready when the service is called, I think it's only ready after the CI job is over. So I may need to return from the webservice asynchronously and introduce a delay. | https://api.github.com/repos/pandas-dev/pandas/pulls/50899 | 2023-01-20T16:44:16Z | 2023-02-27T20:35:25Z | 2023-02-27T20:35:25Z | 2023-02-27T20:35:26Z |
WEB: Make all links in the website relative | diff --git a/web/pandas/_templates/layout.html b/web/pandas/_templates/layout.html
index 3cad22bf938b0..dc91a2932e7e4 100644
--- a/web/pandas/_templates/layout.html
+++ b/web/pandas/_templates/layout.html
@@ -12,7 +12,7 @@
<title>pandas - Python Data Analysis Library</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
- <link rel='shortcut icon' type='image/x-icon' id='favicon-tag' href='{{ base_url }}/static/img/favicon.ico'/>
+ <link rel='shortcut icon' type='image/x-icon' id='favicon-tag' href='{{ base_url }}static/img/favicon.ico'/>
<link rel="stylesheet"
href="https://cdn.jsdelivr.net/npm/bootstrap@5.0.1/dist/css/bootstrap.min.css"
integrity="sha384-+0n0xVW2eSR5OomGNYDnhzAbDsOXxcvSN1TPprVMTNDbiYZCxYbOOl7+AMvyTG2x"
@@ -31,7 +31,7 @@
<span class="navbar-toggler-icon"></span>
</button>
- {% if static.logo %}<a class="navbar-brand" href="{{ base_url }}/"><img alt="" src="{{ base_url }}{{ static.logo }}"/></a>{% endif %}
+ {% if static.logo %}<a class="navbar-brand" href="{{ base_url }}."><img alt="" src="{{ base_url }}{{ static.logo }}"/></a>{% endif %}
<div class="collapse navbar-collapse" id="nav-content">
<ul class="navbar-nav ms-auto">
diff --git a/web/pandas/about/citing.md b/web/pandas/about/citing.md
index b4c7848e5db00..4ce1fdb207865 100644
--- a/web/pandas/about/citing.md
+++ b/web/pandas/about/citing.md
@@ -44,10 +44,10 @@ The official logos of _pandas_ are:
<table class="table logo">
<tr>
<td>
- <img alt="" src="{{ base_url }}/static/img/pandas.svg"/>
+ <img alt="" src="{{ base_url }}static/img/pandas.svg"/>
</td>
<td style="background-color: #150458">
- <img alt="" src="{{ base_url }}/static/img/pandas_white.svg"/>
+ <img alt="" src="{{ base_url }}static/img/pandas_white.svg"/>
</td>
</tr>
</table>
@@ -57,10 +57,10 @@ The official logos of _pandas_ are:
<table class="table logo">
<tr>
<td>
- <img alt="" src="{{ base_url }}/static/img/pandas_secondary.svg"/>
+ <img alt="" src="{{ base_url }}static/img/pandas_secondary.svg"/>
</td>
<td style="background-color: #150458">
- <img alt="" src="{{ base_url }}/static/img/pandas_secondary_white.svg"/>
+ <img alt="" src="{{ base_url }}static/img/pandas_secondary_white.svg"/>
</td>
</tr>
</table>
@@ -70,10 +70,10 @@ The official logos of _pandas_ are:
<table class="table logo">
<tr>
<td>
- <img alt="" src="{{ base_url }}/static/img/pandas_mark.svg"/>
+ <img alt="" src="{{ base_url }}static/img/pandas_mark.svg"/>
</td>
<td style="background-color: #150458">
- <img alt="" src="{{ base_url }}/static/img/pandas_mark_white.svg"/>
+ <img alt="" src="{{ base_url }}static/img/pandas_mark_white.svg"/>
</td>
</tr>
</table>
diff --git a/web/pandas/about/governance.md b/web/pandas/about/governance.md
index 92923db6e6763..0bb61592d7e5d 100644
--- a/web/pandas/about/governance.md
+++ b/web/pandas/about/governance.md
@@ -3,7 +3,7 @@
The official version of this document, along with a list of
individuals and institutions in the roles defined in the governance
section below, is contained in the
-[Project governance](https://pandas.pydata.org/about/governance.html)
+[Project governance]({{ base_url }}about/governance.html)
page of the pandas website.
## The Project
diff --git a/web/pandas/about/roadmap.md b/web/pandas/about/roadmap.md
index 64e730fc42eed..4c12e3e786a32 100644
--- a/web/pandas/about/roadmap.md
+++ b/web/pandas/about/roadmap.md
@@ -17,7 +17,7 @@ tracked on our [issue tracker](https://github.com/pandas-dev/pandas/issues).
The roadmap is defined as a set of major enhancement proposals named PDEPs.
For more information about PDEPs, and how to submit one, please refer to
-[PEDP-1](/pdeps/0001-purpose-and-guidelines.html).
+[PEDP-1]({{ base_url }}pdeps/0001-purpose-and-guidelines.html).
## PDEPs
@@ -27,7 +27,7 @@ For more information about PDEPs, and how to submit one, please refer to
<ul>
{% for pdep in pdeps[pdep_type] %}
- <li><a href="{{ pdep.url }}">{{ pdep.title }}</a></li>
+ <li><a href="{% if not pdep.url.startswith("http") %}{{ base_url }}{% endif %}{{ pdep.url }}">{{ pdep.title }}</a></li>
{% else %}
<li>There are currently no PDEPs with this status</li>
{% endfor %}
diff --git a/web/pandas/about/team.md b/web/pandas/about/team.md
index 5229201ca7d36..a21e8a3142497 100644
--- a/web/pandas/about/team.md
+++ b/web/pandas/about/team.md
@@ -4,7 +4,7 @@
_pandas_ is made with love by more than [2,000 volunteer contributors](https://github.com/pandas-dev/pandas/graphs/contributors).
-If you want to support pandas development, you can find information in the [donations page](../donate.html).
+If you want to support pandas development, you can find information in the [donations page]({{ base_url }}donate.html).
## Active maintainers
@@ -35,7 +35,7 @@ If you want to support pandas development, you can find information in the [dona
> or anyone willing to increase the diversity of our team.
> We have identified visible gaps and obstacles in sustaining diversity and inclusion in the open-source communities and we are proactive in increasing
> the diversity of our team.
-> We have a [code of conduct](../community/coc.html) to ensure a friendly and welcoming environment.
+> We have a [code of conduct]({{ base_url }}community/coc.html) to ensure a friendly and welcoming environment.
> Please send an email to [pandas-code-of-conduct-committee](mailto:pandas-coc@googlegroups.com), if you think we can do a
> better job at achieving this goal.
@@ -43,7 +43,7 @@ If you want to support pandas development, you can find information in the [dona
Wes McKinney is the Benevolent Dictator for Life (BDFL).
-The project governance is available in the [project governance page](governance.html).
+The project governance is available in the [project governance page]({{ base_url }}governance.html).
## Workgroups
diff --git a/web/pandas/community/blog/index.html b/web/pandas/community/blog/index.html
index 627aaa450893b..154d9cccdb7dc 100644
--- a/web/pandas/community/blog/index.html
+++ b/web/pandas/community/blog/index.html
@@ -4,10 +4,10 @@
{% for post in blog.posts %}
<div class="card">
<div class="card-body">
- <h5 class="card-title"><a href="{{post.link }}" target="_blank">{{ post.title }}</a></h5>
+ <h5 class="card-title"><a href="{{ base_url }}{{post.link }}" target="_blank">{{ post.title }}</a></h5>
<h6 class="card-subtitle text-muted small mb-4">Source: {{ post.feed }} | Author: {{ post.author }} | Published: {{ post.published.strftime("%b %d, %Y") }}</h6>
<div class="card-text mb-2">{{ post.summary }}</div>
- <a class="card-link small" href="{{post.link }}" target="_blank">Read more</a>
+ <a class="card-link small" href="{{ base_url }}{{post.link }}" target="_blank">Read more</a>
</div>
</div>
{% endfor %}
diff --git a/web/pandas/config.yml b/web/pandas/config.yml
index 816eb6ab296c1..3c25d2c9eb2db 100644
--- a/web/pandas/config.yml
+++ b/web/pandas/config.yml
@@ -19,40 +19,40 @@ main:
- fenced_code
- meta
static:
- logo: /static/img/pandas_white.svg
+ logo: static/img/pandas_white.svg
css:
- - /static/css/pandas.css
+ - static/css/pandas.css
navbar:
- name: "About us"
target:
- name: "About pandas"
- target: /about/
+ target: about/
- name: "Project roadmap"
- target: /about/roadmap.html
+ target: about/roadmap.html
- name: "Governance"
- target: /about/governance.html
+ target: about/governance.html
- name: "Team"
- target: /about/team.html
+ target: about/team.html
- name: "Sponsors"
- target: /about/sponsors.html
+ target: about/sponsors.html
- name: "Citing and logo"
- target: /about/citing.html
+ target: about/citing.html
- name: "Getting started"
- target: /getting_started.html
+ target: getting_started.html
- name: "Documentation"
- target: /docs/
+ target: docs/
- name: "Community"
target:
- name: "Blog"
- target: /community/blog/
+ target: community/blog/
- name: "Ask a question (StackOverflow)"
target: https://stackoverflow.com/questions/tagged/pandas
- name: "Code of conduct"
- target: /community/coc.html
+ target: community/coc.html
- name: "Ecosystem"
- target: /community/ecosystem.html
+ target: community/ecosystem.html
- name: "Contribute"
- target: /contribute.html
+ target: contribute.html
blog:
num_posts: 50
posts_path: community/blog
@@ -141,46 +141,46 @@ sponsors:
active:
- name: "NumFOCUS"
url: https://numfocus.org/
- logo: /static/img/partners/numfocus.svg
+ logo: static/img/partners/numfocus.svg
kind: numfocus
- name: "Two Sigma"
url: https://www.twosigma.com/
- logo: /static/img/partners/two_sigma.svg
+ logo: static/img/partners/two_sigma.svg
kind: partner
description: "Jeff Reback"
- name: "Voltron Data"
url: https://voltrondata.com/
- logo: /static/img/partners/voltron_data.svg
+ logo: static/img/partners/voltron_data.svg
kind: partner
description: "Joris Van den Bossche"
- name: "d-fine GmbH"
url: https://www.d-fine.com/en/
- logo: /static/img/partners/dfine.svg
+ logo: static/img/partners/dfine.svg
kind: partner
description: "Patrick Hoefler"
- name: "Quansight"
url: https://quansight.com/
- logo: /static/img/partners/quansight_labs.svg
+ logo: static/img/partners/quansight_labs.svg
kind: partner
description: "Marco Gorelli"
- name: "Nvidia"
url: https://www.nvidia.com
- logo: /static/img/partners/nvidia.svg
+ logo: static/img/partners/nvidia.svg
kind: partner
description: "Matthew Roeschke"
- name: "Tidelift"
url: https://tidelift.com
- logo: /static/img/partners/tidelift.svg
+ logo: static/img/partners/tidelift.svg
kind: regular
description: "<i>pandas</i> is part of the <a href=\"https://tidelift.com/subscription/pkg/pypi-pandas?utm_source=pypi-pandas&utm_medium=referral&utm_campaign=readme\">Tidelift subscription</a>. You can support pandas by becoming a Tidelift subscriber."
- name: "Chan Zuckerberg Initiative"
url: https://chanzuckerberg.com/
- logo: /static/img/partners/czi.svg
+ logo: static/img/partners/czi.svg
kind: regular
description: "<i>pandas</i> is funded by the Essential Open Source Software for Science program of the Chan Zuckerberg Initiative. The funding is used for general maintenance, improve extension types, and a efficient string type."
- name: "Bodo"
url: https://www.bodo.ai/
- logo: /static/img/partners/bodo.svg
+ logo: static/img/partners/bodo.svg
kind: regular
description: "Bodo's parallel computing platform uses pandas API, and Bodo financially supports pandas development to help improve pandas, in particular the pandas API"
inkind: # not included in active so they don't appear in the home page
diff --git a/web/pandas/contribute.md b/web/pandas/contribute.md
index 0163a1c8110b2..258ba149f1849 100644
--- a/web/pandas/contribute.md
+++ b/web/pandas/contribute.md
@@ -3,8 +3,8 @@
_pandas_ is and will always be **free**. To make the development sustainable, we need _pandas_ users, corporate
and individual, to support the development by providing their time and money.
-You can find more information about current developers in the [team page](about/team.html),
-and about current sponsors in the [sponsors page](about/sponsors.html).
+You can find more information about current developers in the [team page]({{ base_url }}about/team.html),
+and about current sponsors in the [sponsors page]({{ base_url }}about/sponsors.html).
<section>
<div class="container mt-5">
@@ -19,9 +19,9 @@ and about current sponsors in the [sponsors page](about/sponsors.html).
pandas depends on companies and institutions using the software to support its development. Hiring
people to work on pandas, or letting existing employees to contribute to the
software. Or sponsoring pandas with funds, so the project can hire people to
- progress on the <a href="about/roadmap.html">pandas roadmap</a>.
+ progress on the <a href="{{ base_url }}about/roadmap.html">pandas roadmap</a>.
</p>
- <p>More information in the <a href="about/sponsors.html">sponsors page</a></p>
+ <p>More information in the <a href="{{ base_url }}about/sponsors.html">sponsors page</a></p>
</div>
<div class="col-md-4">
<span class="fa-stack fa-4x">
@@ -35,7 +35,7 @@ and about current sponsors in the [sponsors page](about/sponsors.html).
to the documentation (including translators) and others. There are tasks for all
levels, including beginners.
</p>
- <p>More information in the <a href="{{ base_url }}/docs/development/index.html">contributing page</a></p>
+ <p>More information in the <a href="{{ base_url }}docs/development/index.html">contributing page</a></p>
</div>
<div class="col-md-4">
<span class="fa-stack fa-4x">
@@ -48,7 +48,7 @@ and about current sponsors in the [sponsors page](about/sponsors.html).
infrastructure, travel expenses for our volunteer contributors to attend
the in-person sprints, or to give small grants to develop features.
</p>
- <p>Make your donation in the <a href="donate.html">donate page</a></p>
+ <p>Make your donation in the <a href="{{ base_url }}donate.html">donate page</a></p>
</div>
</div>
</div>
diff --git a/web/pandas/getting_started.md b/web/pandas/getting_started.md
index cbcc3a35eb44f..cb14e52edad2c 100644
--- a/web/pandas/getting_started.md
+++ b/web/pandas/getting_started.md
@@ -4,7 +4,7 @@
The next steps provides the easiest and recommended way to set up your
environment to use pandas. Other installation options can be found in
-the [advanced installation page]({{ base_url}}/docs/getting_started/install.html).
+the [advanced installation page]({{ base_url}}docs/getting_started/install.html).
1. Download [Anaconda](https://www.anaconda.com/distribution/) for your operating system and
the latest Python version, run the installer, and follow the steps. Please note:
@@ -32,7 +32,7 @@ the [advanced installation page]({{ base_url}}/docs/getting_started/install.html
## Tutorials
-You can learn more about pandas in the [tutorials]({{ base_url }}/docs/getting_started/intro_tutorials/),
+You can learn more about pandas in the [tutorials]({{ base_url }}docs/getting_started/intro_tutorials/),
and more about JupyterLab in the
[JupyterLab documentation](https://jupyterlab.readthedocs.io/en/stable/user/interface.html).
@@ -42,7 +42,7 @@ The book we recommend to learn pandas is [Python for Data Analysis](https://amzn
by [Wes McKinney](https://wesmckinney.com/), creator of pandas.
<a href="https://amzn.to/3DyLaJc">
- <img alt="Python for Data Analysis" src="{{ base_url }}/static/img/pydata_book.gif"/>
+ <img alt="Python for Data Analysis" src="{{ base_url }}static/img/pydata_book.gif"/>
</a>
## Videos
diff --git a/web/pandas/index.html b/web/pandas/index.html
index ce1e1e1a8f561..61f62e9472859 100644
--- a/web/pandas/index.html
+++ b/web/pandas/index.html
@@ -10,7 +10,7 @@ <h1>pandas</h1>
built on top of the <a href="https://www.python.org">Python</a> programming language.
</p>
<p>
- <a class="btn btn-primary" href="{{ base_url }}/getting_started.html">Install pandas now!</a>
+ <a class="btn btn-primary" href="{{ base_url }}getting_started.html">Install pandas now!</a>
</p>
</section>
@@ -19,25 +19,25 @@ <h1>pandas</h1>
<h5>Getting started</h5>
<ul>
<!-- <li><a href="{{ base_url }}/try.html">Try pandas online</a></li> -->
- <li><a href="{{ base_url }}/getting_started.html">Install pandas</a></li>
- <li><a href="{{ base_url }}/docs/getting_started/index.html">Getting started</a></li>
+ <li><a href="{{ base_url }}getting_started.html">Install pandas</a></li>
+ <li><a href="{{ base_url }}docs/getting_started/index.html">Getting started</a></li>
</ul>
</div>
<div class="col-md-4">
<h5>Documentation</h5>
<ul>
- <li><a href="{{ base_url }}/docs/user_guide/index.html">User guide</a></li>
- <li><a href="{{ base_url }}/docs/reference/index.html">API reference</a></li>
- <li><a href="{{ base_url }}/docs/development/index.html">Contributing to pandas</a></li>
- <li><a href="{{ base_url }}/docs/whatsnew/index.html">Release notes</a></li>
+ <li><a href="{{ base_url }}docs/user_guide/index.html">User guide</a></li>
+ <li><a href="{{ base_url }}docs/reference/index.html">API reference</a></li>
+ <li><a href="{{ base_url }}docs/development/index.html">Contributing to pandas</a></li>
+ <li><a href="{{ base_url }}docs/whatsnew/index.html">Release notes</a></li>
</ul>
</div>
<div class="col-md-4">
<h5>Community</h5>
<ul>
- <li><a href="{{ base_url }}/about/index.html">About pandas</a></li>
+ <li><a href="{{ base_url }}about/index.html">About pandas</a></li>
<li><a href="https://stackoverflow.com/questions/tagged/pandas">Ask a question</a></li>
- <li><a href="{{ base_url }}/community/ecosystem.html">Ecosystem</a></li>
+ <li><a href="{{ base_url }}community/ecosystem.html">Ecosystem</a></li>
</ul>
</div>
</div>
@@ -56,16 +56,16 @@ <h5>With the support of:</h5>
{% endfor %}
</div>
{% endfor %}
- <p class="mt-4">The full list of companies supporting <i>pandas</i> is available in the <a href="{{ base_url }}/about/sponsors.html">sponsors page</a>.
+ <p class="mt-4">The full list of companies supporting <i>pandas</i> is available in the <a href="{{ base_url }}about/sponsors.html">sponsors page</a>.
</section>
</div>
<div class="col-md-3">
{% if releases %}
<h4>Latest version: {{ releases[0].name }}</h4>
<ul>
- <li><a href="docs/whatsnew/v{{ releases[0].name }}.html">What's new in {{ releases[0].name }}</a></li>
+ <li><a href="{{ base_url }}docs/whatsnew/v{{ releases[0].name }}.html">What's new in {{ releases[0].name }}</a></li>
<li>Release date:<br/>{{ releases[0].published.strftime("%b %d, %Y") }}</li>
- <li><a href="{{ base_url}}/docs/">Documentation (web)</a></li>
+ <li><a href="{{ base_url}}docs/">Documentation (web)</a></li>
<li><a href="{{ releases[0].url }}">Download source code</a></li>
</ul>
{% endif %}
@@ -88,7 +88,7 @@ <h4>Follow us</h4>
<h4>Get the book</h4>
<p class="book">
<a href="https://amzn.to/3DyLaJc">
- <img class="img-fluid" alt="Python for Data Analysis" src="{{ base_url }}/static/img/pydata_book.gif"/>
+ <img class="img-fluid" alt="Python for Data Analysis" src="{{ base_url }}static/img/pydata_book.gif"/>
</a>
</p>
{% if releases[1:5] %}
diff --git a/web/pandas/try.md b/web/pandas/try.md
deleted file mode 100644
index 20e119759df6f..0000000000000
--- a/web/pandas/try.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# Try pandas online
-
-<section>
- <pre data-executable>
-import pandas
-fibonacci = pandas.Series([1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144])
-fibonacci.sum()
- </pre>
- <script src="https://combinatronics.com/ines/juniper/v0.1.0/dist/juniper.min.js"></script>
- <script>new Juniper({ repo: 'datapythonista/pandas-web' })</script>
-</section>
-
-## Interactive tutorials
-
-You can also try _pandas_ on [Binder](https://mybinder.org/) for one of the next topics:
-
-- Exploratory analysis of US presidents
-- Preprocessing the Titanic dataset to train a machine learning model
-- Forecasting the stock market
-
-_(links will be added soon)_
diff --git a/web/pandas_web.py b/web/pandas_web.py
index 8c508a15f9a2b..e4ffa2cde7cc9 100755
--- a/web/pandas_web.py
+++ b/web/pandas_web.py
@@ -278,7 +278,7 @@ def roadmap_pdeps(context):
context["pdeps"][status].append(
{
"title": title,
- "url": f"/pdeps/{html_file}",
+ "url": f"pdeps/{html_file}",
}
)
@@ -383,7 +383,6 @@ def extend_base_template(content: str, base_template: str) -> str:
def main(
source_path: str,
target_path: str,
- base_url: str,
) -> int:
"""
Copy every file in the source directory to the target directory.
@@ -397,7 +396,7 @@ def main(
os.makedirs(target_path, exist_ok=True)
sys.stderr.write("Generating context...\n")
- context = get_context(config_fname, base_url=base_url, target_path=target_path)
+ context = get_context(config_fname, target_path=target_path)
sys.stderr.write("Context generated\n")
templates_path = os.path.join(source_path, context["main"]["templates_path"])
@@ -420,6 +419,7 @@ def main(
content, extensions=context["main"]["markdown_extensions"]
)
content = extend_base_template(body, context["main"]["base_template"])
+ context["base_url"] = "".join(["../"] * os.path.normpath(fname).count("/"))
content = jinja_env.from_string(content).render(**context)
fname = os.path.splitext(fname)[0] + ".html"
with open(os.path.join(target_path, fname), "w") as f:
@@ -438,8 +438,5 @@ def main(
parser.add_argument(
"--target-path", default="build", help="directory where to write the output"
)
- parser.add_argument(
- "--base-url", default="", help="base url where the website is served from"
- )
args = parser.parse_args()
- sys.exit(main(args.source_path, args.target_path, args.base_url))
+ sys.exit(main(args.source_path, args.target_path))
| I have a first version implemented of the system that will allow us to preview the rendered web/docs of a PR without having to download the artifact (see #50832).
In order to be able to navigate the website in a relative path (i.e. https://pandas.pydata.org/preview/XXXXX/ instead of the root https://pandas.pydata.org/) we need the changes implemented here, that make all the links relative, so for example the a link from `/preview/XXXXX/donate.html` would go to `/preview/XXXXX/getting_started.html`, instead of simply `/getting_started.html` like we've got now. | https://api.github.com/repos/pandas-dev/pandas/pulls/50897 | 2023-01-20T16:16:49Z | 2023-01-26T16:53:09Z | 2023-01-26T16:53:09Z | 2023-01-26T16:58:03Z |
DOC: Fix some RT02 issues in docstrings | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 0aee8cd546d47..b22f19faff9da 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -79,23 +79,8 @@ fi
### DOCSTRINGS ###
if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
- MSG='Validate docstrings (EX04, GL01, GL02, GL03, GL04, GL05, GL06, GL07, GL09, GL10, PR03, PR04, PR05, PR06, PR08, PR09, PR10, RT01, RT04, RT05, SA02, SA03, SA04, SS01, SS02, SS03, SS04, SS05, SS06)' ; echo $MSG
- $BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=EX04,GL01,GL02,GL03,GL04,GL05,GL06,GL07,GL09,GL10,PR03,PR04,PR05,PR06,PR08,PR09,PR10,RT01,RT04,RT05,SA02,SA03,SA04,SS01,SS02,SS03,SS04,SS05,SS06
- RET=$(($RET + $?)) ; echo $MSG "DONE"
-
- MSG='Partially validate docstrings (RT02)' ; echo $MSG
- $BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=RT02 --ignore_functions \
- pandas.Index.all \
- pandas.Index.any \
- pandas.MultiIndex.drop \
- pandas.DatetimeIndex.to_pydatetime \
- pandas.TimedeltaIndex.to_pytimedelta \
- pandas.io.formats.style.Styler.export \
- pandas.api.extensions.ExtensionArray.astype \
- pandas.api.extensions.ExtensionArray.dropna \
- pandas.api.extensions.ExtensionArray.isna \
- pandas.api.extensions.ExtensionArray.repeat \
- pandas.api.extensions.ExtensionArray.unique
+ MSG='Validate docstrings (EX04, GL01, GL02, GL03, GL04, GL05, GL06, GL07, GL09, GL10, PR03, PR04, PR05, PR06, PR08, PR09, PR10, RT01, RT02, RT04, RT05, SA02, SA03, SA04, SS01, SS02, SS03, SS04, SS05, SS06)' ; echo $MSG
+ $BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=EX04,GL01,GL02,GL03,GL04,GL05,GL06,GL07,GL09,GL10,PR03,PR04,PR05,PR06,PR08,PR09,PR10,RT01,RT02,RT04,RT05,SA02,SA03,SA04,SS01,SS02,SS03,SS04,SS05,SS06
RET=$(($RET + $?)) ; echo $MSG "DONE"
fi
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 4497583f60d71..c261a41e1e77e 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -566,7 +566,7 @@ def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike:
Returns
-------
- array : np.ndarray or ExtensionArray
+ np.ndarray or pandas.api.extensions.ExtensionArray
An ExtensionArray if dtype is ExtensionDtype,
Otherwise a NumPy ndarray with 'dtype' for its dtype.
"""
@@ -600,7 +600,7 @@ def isna(self) -> np.ndarray | ExtensionArraySupportsAnyAll:
Returns
-------
- na_values : Union[np.ndarray, ExtensionArray]
+ numpy.ndarray or pandas.api.extensions.ExtensionArray
In most cases, this should return a NumPy ndarray. For
exceptional cases like ``SparseArray``, where returning
an ndarray would be expensive, an ExtensionArray may be
@@ -819,7 +819,7 @@ def dropna(self: ExtensionArrayT) -> ExtensionArrayT:
Returns
-------
- valid : ExtensionArray
+ pandas.api.extensions.ExtensionArray
"""
# error: Unsupported operand type for ~ ("ExtensionArray")
return self[~self.isna()] # type: ignore[operator]
@@ -880,7 +880,7 @@ def unique(self: ExtensionArrayT) -> ExtensionArrayT:
Returns
-------
- uniques : ExtensionArray
+ pandas.api.extensions.ExtensionArray
"""
uniques = unique(self.astype(object))
return self._from_sequence(uniques, dtype=self.dtype)
@@ -1088,7 +1088,7 @@ def factorize(
Returns
-------
- repeated_array : %(klass)s
+ %(klass)s
Newly created %(klass)s with repeated elements.
See Also
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 53d02e92946d9..d570a8822649a 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -1067,7 +1067,7 @@ def to_pydatetime(self) -> npt.NDArray[np.object_]:
Returns
-------
- datetimes : ndarray[object]
+ numpy.ndarray
"""
return ints_to_pydatetime(self.asi8, tz=self.tz, reso=self._creso)
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 1371472af34f8..1a2b9728f80a1 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -781,7 +781,7 @@ def to_pytimedelta(self) -> npt.NDArray[np.object_]:
Returns
-------
- timedeltas : ndarray[object]
+ numpy.ndarray
"""
return ints_to_pytimedelta(self._ndarray)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index bc8087db7b49b..4d2e4758817be 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -6680,7 +6680,7 @@ def any(self, *args, **kwargs):
Returns
-------
- any : bool or array-like (if axis is specified)
+ bool or array-like (if axis is specified)
A single element array-like may be converted to bool.
See Also
@@ -6724,7 +6724,7 @@ def all(self, *args, **kwargs):
Returns
-------
- all : bool or array-like (if axis is specified)
+ bool or array-like (if axis is specified)
A single element array-like may be converted to bool.
See Also
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index f7fb6799d2376..d7fba354ceead 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -2164,7 +2164,7 @@ def drop( # type: ignore[override]
Returns
-------
- dropped : MultiIndex
+ MultiIndex
"""
if level is not None:
return self._drop_from_level(codes, level, errors)
diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py
index a9b3816b41774..a232d1dd77da5 100644
--- a/pandas/core/strings/accessor.py
+++ b/pandas/core/strings/accessor.py
@@ -1479,7 +1479,7 @@ def repeat(self, repeats):
Returns
-------
- Series or Index of object
+ Series or pandas.Index
Series or Index of repeated string objects specified by
input parameter repeats.
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index dd361809e197c..442f2ab72a1e2 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -2035,7 +2035,7 @@ def export(self) -> dict[str, Any]:
Returns
-------
- styles : dict
+ dict
See Also
--------
| - [ ] closes #49968 (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Towards #49968 | https://api.github.com/repos/pandas-dev/pandas/pulls/50896 | 2023-01-20T14:47:18Z | 2023-01-20T17:30:40Z | 2023-01-20T17:30:40Z | 2023-01-20T18:27:22Z |
WEB: fix list formatting in PDEP-4 | diff --git a/web/pandas/pdeps/0004-consistent-to-datetime-parsing.md b/web/pandas/pdeps/0004-consistent-to-datetime-parsing.md
index 10dc4486b90e9..7635fabe2dbc6 100644
--- a/web/pandas/pdeps/0004-consistent-to-datetime-parsing.md
+++ b/web/pandas/pdeps/0004-consistent-to-datetime-parsing.md
@@ -9,6 +9,7 @@
## Abstract
The suggestion is that:
+
- ``to_datetime`` becomes strict and uses the same datetime format to parse all elements in its input.
The format will either be inferred from the first non-NaN element (if `format` is not provided by the user), or from
`format`;
@@ -42,6 +43,7 @@ Out[2]: DatetimeIndex(['2000-12-01', '2000-01-13'], dtype='datetime64[ns]', freq
## Detailed Description
Concretely, the suggestion is:
+
- if no ``format`` is specified, ``pandas`` will guess the format from the first non-NaN row
and parse the rest of the input according to that format. Errors will be handled
according to the ``errors`` argument - there will be no silent switching of format;
@@ -66,6 +68,7 @@ dtype: datetime64[ns]
## Usage and Impact
My expectation is that the impact would be a net-positive:
+
- potentially severe bugs in people's code will be caught early;
- users who actually want mixed formats can still parse them, but now they'd be forced to be
very explicit about it;
@@ -80,6 +83,7 @@ The whatsnew notes read
> In the next major version release, 2.0, several larger API changes are being considered without a formal deprecation.
I'd suggest making this change as part of the above, because:
+
- it would only help prevent bugs, not introduce any;
- given the severity of bugs that can result from the current behaviour, waiting another 2 years until pandas 3.0.0
would potentially cause a lot of damage.
| The list points are currently not rendered correctly: http://pandas.pydata.org/pdeps/0004-consistent-to-datetime-parsing.html | https://api.github.com/repos/pandas-dev/pandas/pulls/50895 | 2023-01-20T14:03:47Z | 2023-01-20T17:03:15Z | 2023-01-20T17:03:15Z | 2023-01-20T17:03:18Z |
CI dont fail import scipy.stats | diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py
index 2d395a7cbd608..61b66f38f6c98 100644
--- a/pandas/tests/frame/test_reductions.py
+++ b/pandas/tests/frame/test_reductions.py
@@ -1607,7 +1607,12 @@ def test_reduction_axis_none_returns_scalar(method):
result = getattr(df, method)(axis=None)
np_arr = df.to_numpy()
if method in {"skew", "kurt"}:
- comp_mod = pytest.importorskip("scipy.stats")
+ try:
+ comp_mod = pytest.importorskip("scipy.stats")
+ except AttributeError:
+ # TODO remove this once the following is addressed:
+ # https://github.com/scipy/scipy/issues/17811
+ return
if method == "kurt":
method = "kurtosis"
expected = getattr(comp_mod, method)(np_arr, bias=False, axis=None)
diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py
index 5bc55ee789fe6..7273a3ce85c9a 100644
--- a/pandas/util/_test_decorators.py
+++ b/pandas/util/_test_decorators.py
@@ -77,6 +77,13 @@ def safe_import(mod_name: str, min_version: str | None = None):
return False
else:
raise
+ except AttributeError:
+ # TODO remove this once the following is addressed:
+ # https://github.com/scipy/scipy/issues/17811
+ if mod_name == "scipy.stats":
+ return False
+ else:
+ raise
if not min_version:
return mod
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50892 | 2023-01-20T12:27:30Z | 2023-01-20T17:50:04Z | null | 2023-01-20T17:50:04Z |
API: Timestamp and Timedelta .value changing in 2.0 | diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx
index 2565d7998c9c7..2a6c26d595548 100644
--- a/pandas/_libs/index.pyx
+++ b/pandas/_libs/index.pyx
@@ -517,14 +517,16 @@ cdef class DatetimeEngine(Int64Engine):
# NB: caller is responsible for ensuring tzawareness compat
# before we get here
if scalar is NaT:
- return NaT.value
+ return NaT._value
elif isinstance(scalar, _Timestamp):
if scalar._creso == self._creso:
- return scalar.value
+ return scalar._value
else:
# Note: caller is responsible for catching potential ValueError
# from _as_creso
- return (<_Timestamp>scalar)._as_creso(self._creso, round_ok=False).value
+ return (
+ (<_Timestamp>scalar)._as_creso(self._creso, round_ok=False)._value
+ )
raise TypeError(scalar)
def __contains__(self, val: object) -> bool:
@@ -585,14 +587,16 @@ cdef class TimedeltaEngine(DatetimeEngine):
cdef int64_t _unbox_scalar(self, scalar) except? -1:
if scalar is NaT:
- return NaT.value
+ return NaT._value
elif isinstance(scalar, _Timedelta):
if scalar._creso == self._creso:
- return scalar.value
+ return scalar._value
else:
# Note: caller is responsible for catching potential ValueError
# from _as_creso
- return (<_Timedelta>scalar)._as_creso(self._creso, round_ok=False).value
+ return (
+ (<_Timedelta>scalar)._as_creso(self._creso, round_ok=False)._value
+ )
raise TypeError(scalar)
@@ -600,7 +604,7 @@ cdef class PeriodEngine(Int64Engine):
cdef int64_t _unbox_scalar(self, scalar) except? -1:
if scalar is NaT:
- return scalar.value
+ return scalar._value
if is_period_object(scalar):
# NB: we assume that we have the correct freq here.
return scalar.ordinal
diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c
index 13b96f9f8fccd..e892b50515327 100644
--- a/pandas/_libs/src/ujson/python/objToJSON.c
+++ b/pandas/_libs/src/ujson/python/objToJSON.c
@@ -1306,9 +1306,9 @@ char **NpyArr_encodeLabels(PyArrayObject *labels, PyObjectEncoder *enc,
castfunc(dataptr, &nanosecVal, 1, NULL, NULL);
} else if (PyDate_Check(item) || PyDelta_Check(item)) {
is_datetimelike = 1;
- if (PyObject_HasAttrString(item, "value")) {
+ if (PyObject_HasAttrString(item, "_value")) {
// see test_date_index_and_values for case with non-nano
- nanosecVal = get_long_attr(item, "value");
+ nanosecVal = get_long_attr(item, "_value");
} else {
if (PyDelta_Check(item)) {
nanosecVal = total_seconds(item) *
@@ -1554,8 +1554,8 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) {
}
return;
} else if (PyDelta_Check(obj)) {
- if (PyObject_HasAttrString(obj, "value")) {
- value = get_long_attr(obj, "value");
+ if (PyObject_HasAttrString(obj, "_value")) {
+ value = get_long_attr(obj, "_value");
} else {
value = total_seconds(obj) * 1000000000LL; // nanoseconds per sec
}
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 89a676f51dc47..19dd7aabe6b8e 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -705,7 +705,7 @@ def array_to_datetime_with_tz(ndarray values, tzinfo tz):
# datetime64, tznaive pydatetime, int, float
ts = ts.tz_localize(tz)
ts = ts.as_unit("ns")
- ival = ts.value
+ ival = ts._value
# Analogous to: result[i] = ival
(<int64_t*>cnp.PyArray_MultiIter_DATA(mi, 0))[0] = ival
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx
index 933e10a35118c..2c70d1681b051 100644
--- a/pandas/_libs/tslibs/conversion.pyx
+++ b/pandas/_libs/tslibs/conversion.pyx
@@ -765,7 +765,7 @@ cdef int64_t parse_pydatetime(
result = _ts.value
else:
if isinstance(val, _Timestamp):
- result = val.as_unit("ns").value
+ result = val.as_unit("ns")._value
else:
result = pydatetime_to_dt64(val, dts)
check_dts_bounds(dts)
diff --git a/pandas/_libs/tslibs/nattype.pxd b/pandas/_libs/tslibs/nattype.pxd
index e878fa7629f25..32705aa633135 100644
--- a/pandas/_libs/tslibs/nattype.pxd
+++ b/pandas/_libs/tslibs/nattype.pxd
@@ -8,7 +8,7 @@ cdef set c_nat_strings
cdef class _NaT(datetime):
cdef readonly:
- int64_t value
+ int64_t _value
cdef _NaT c_NaT
diff --git a/pandas/_libs/tslibs/nattype.pyi b/pandas/_libs/tslibs/nattype.pyi
index 72f55bb50895a..04f8943710416 100644
--- a/pandas/_libs/tslibs/nattype.pyi
+++ b/pandas/_libs/tslibs/nattype.pyi
@@ -18,7 +18,9 @@ class _NatComparison:
def __call__(self, other: _NaTComparisonTypes) -> bool: ...
class NaTType:
- value: np.int64
+ _value: np.int64
+ @property
+ def value(self) -> int: ...
@property
def asm8(self) -> np.datetime64: ...
def to_datetime64(self) -> np.datetime64: ...
diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx
index 9407f57a282bf..d9d8ce3bb16d1 100644
--- a/pandas/_libs/tslibs/nattype.pyx
+++ b/pandas/_libs/tslibs/nattype.pyx
@@ -357,10 +357,14 @@ class NaTType(_NaT):
cdef _NaT base
base = _NaT.__new__(cls, 1, 1, 1)
- base.value = NPY_NAT
+ base._value= NPY_NAT
return base
+ @property
+ def value(self) -> int:
+ return self._value
+
def __reduce_ex__(self, protocol):
# python 3.6 compat
# https://bugs.python.org/issue28730
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index 79332f8ede936..9ad95651cb3dc 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -183,7 +183,7 @@ def apply_wraps(func):
res = result.tz_localize(None)
else:
res = result
- value = res.as_unit("ns").value
+ value = res.as_unit("ns")._value
result = Timestamp(value + nano)
if tz is not None and result.tzinfo is None:
@@ -3469,7 +3469,7 @@ cdef class FY5253Quarter(FY5253Mixin):
else:
tdelta = Timedelta(0)
- # Note: we always have tdelta.value >= 0
+ # Note: we always have tdelta._value>= 0
return start, num_qtrs, tdelta
@apply_wraps
@@ -3481,7 +3481,7 @@ cdef class FY5253Quarter(FY5253Mixin):
prev_year_end, num_qtrs, tdelta = self._rollback_to_year(other)
res = prev_year_end
n += num_qtrs
- if self.n <= 0 and tdelta.value > 0:
+ if self.n <= 0 and tdelta._value > 0:
n += 1
# Possible speedup by handling years first.
diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx
index 3ca87f8680b53..bb06c65597987 100644
--- a/pandas/_libs/tslibs/strptime.pyx
+++ b/pandas/_libs/tslibs/strptime.pyx
@@ -113,14 +113,14 @@ cdef bint parse_today_now(str val, int64_t* iresult, bint utc):
# microsecond resolution
if val == "now":
if utc:
- iresult[0] = Timestamp.utcnow().value * 1000
+ iresult[0] = Timestamp.utcnow()._value * 1000
else:
# GH#18705 make sure to_datetime("now") matches Timestamp("now")
# Note using Timestamp.now() is faster than Timestamp("now")
- iresult[0] = Timestamp.now().value * 1000
+ iresult[0] = Timestamp.now()._value * 1000
return True
elif val == "today":
- iresult[0] = Timestamp.today().value * 1000
+ iresult[0] = Timestamp.today()._value * 1000
return True
return False
@@ -284,7 +284,7 @@ def array_strptime(
utc,
)
if isinstance(val, _Timestamp):
- iresult[i] = val.tz_localize(None).as_unit("ns").value
+ iresult[i] = val.tz_localize(None).as_unit("ns")._value
else:
iresult[i] = pydatetime_to_dt64(val.replace(tzinfo=None), &dts)
check_dts_bounds(&dts)
diff --git a/pandas/_libs/tslibs/timedeltas.pxd b/pandas/_libs/tslibs/timedeltas.pxd
index 8a354c4cb631f..fb6e29a8932a1 100644
--- a/pandas/_libs/tslibs/timedeltas.pxd
+++ b/pandas/_libs/tslibs/timedeltas.pxd
@@ -14,7 +14,7 @@ cdef bint is_any_td_scalar(object obj)
cdef class _Timedelta(timedelta):
cdef readonly:
- int64_t value # nanoseconds
+ int64_t _value # nanoseconds
bint _is_populated # are my components populated
int64_t _d, _h, _m, _s, _ms, _us, _ns
NPY_DATETIMEUNIT _creso
diff --git a/pandas/_libs/tslibs/timedeltas.pyi b/pandas/_libs/tslibs/timedeltas.pyi
index c9904e4592329..d67a330e0b0c2 100644
--- a/pandas/_libs/tslibs/timedeltas.pyi
+++ b/pandas/_libs/tslibs/timedeltas.pyi
@@ -89,6 +89,7 @@ class Timedelta(timedelta):
max: ClassVar[Timedelta]
resolution: ClassVar[Timedelta]
value: int # np.int64
+ _value: int # np.int64
# error: "__new__" must return a class instance (got "Union[Timestamp, NaTType]")
def __new__( # type: ignore[misc]
cls: type[_S],
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index d7352f80132a2..5ecce54e01db6 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -243,7 +243,7 @@ cpdef int64_t delta_to_nanoseconds(
in_reso = delta._creso
elif isinstance(delta, _Timedelta):
- n = delta.value
+ n = delta._value
in_reso = delta._creso
elif is_timedelta64_object(delta):
@@ -342,7 +342,7 @@ cdef convert_to_timedelta64(object ts, str unit):
if ts._creso != NPY_FR_ns:
ts = ts.as_unit("ns").asm8
else:
- ts = np.timedelta64(ts.value, "ns")
+ ts = np.timedelta64(ts._value, "ns")
elif is_timedelta64_object(ts):
ts = ensure_td64ns(ts)
elif is_integer_object(ts):
@@ -737,7 +737,7 @@ cdef bint _validate_ops_compat(other):
def _op_unary_method(func, name):
def f(self):
- new_value = func(self.value)
+ new_value = func(self._value)
return _timedelta_from_value_and_reso(Timedelta, new_value, self._creso)
f.__name__ = name
return f
@@ -795,7 +795,7 @@ def _binary_op_method_timedeltalike(op, name):
elif self._creso > other._creso:
other = (<_Timedelta>other)._as_creso(self._creso, round_ok=True)
- res = op(self.value, other.value)
+ res = op(self._value, other._value)
if res == NPY_NAT:
# e.g. test_implementation_limits
# TODO: more generally could do an overflowcheck in op?
@@ -965,7 +965,7 @@ cdef _timedelta_from_value_and_reso(cls, int64_t value, NPY_DATETIMEUNIT reso):
"Only resolutions 's', 'ms', 'us', 'ns' are supported."
)
- td_base.value = value
+ td_base._value= value
td_base._is_populated = 0
td_base._creso = reso
return td_base
@@ -1016,6 +1016,17 @@ cdef class _Timedelta(timedelta):
max = MinMaxReso("max")
resolution = MinMaxReso("resolution")
+ @property
+ def value(self):
+ try:
+ return convert_reso(self._value, self._creso, NPY_FR_ns, False)
+ except OverflowError:
+ raise OverflowError(
+ "Cannot convert Timedelta to nanoseconds without overflow. "
+ "Use `.asm8.view('i8')` to cast represent Timedelta in its own "
+ f"unit (here, {self.unit})."
+ )
+
@property
def _unit(self) -> str:
"""
@@ -1089,7 +1100,7 @@ cdef class _Timedelta(timedelta):
# if td1 and td2 have different _resos. timedelta64 also has this
# non-invariant behavior.
# see GH#44504
- return hash(self.value)
+ return hash(self._value)
elif self._is_in_pytimedelta_bounds() and (
self._creso == NPY_FR_ns or self._creso == NPY_DATETIMEUNIT.NPY_FR_us
):
@@ -1108,7 +1119,7 @@ cdef class _Timedelta(timedelta):
obj = (<_Timedelta>self)._as_creso(<NPY_DATETIMEUNIT>(self._creso + 1))
except OverflowError:
# Doesn't fit, so we're off the hook
- return hash(self.value)
+ return hash(self._value)
else:
return hash(obj)
@@ -1144,7 +1155,7 @@ cdef class _Timedelta(timedelta):
return NotImplemented
if self._creso == ots._creso:
- return cmp_scalar(self.value, ots.value, op)
+ return cmp_scalar(self._value, ots._value, op)
return self._compare_mismatched_resos(ots, op)
# TODO: re-use/share with Timestamp
@@ -1155,13 +1166,13 @@ cdef class _Timedelta(timedelta):
npy_datetimestruct dts_other
# dispatch to the datetimestruct utils instead of writing new ones!
- pandas_datetime_to_datetimestruct(self.value, self._creso, &dts_self)
- pandas_datetime_to_datetimestruct(other.value, other._creso, &dts_other)
+ pandas_datetime_to_datetimestruct(self._value, self._creso, &dts_self)
+ pandas_datetime_to_datetimestruct(other._value, other._creso, &dts_other)
return cmp_dtstructs(&dts_self, &dts_other, op)
cdef bint _has_ns(self):
if self._creso == NPY_FR_ns:
- return self.value % 1000 != 0
+ return self._value % 1000 != 0
elif self._creso < NPY_FR_ns:
# i.e. seconds, millisecond, microsecond
return False
@@ -1185,7 +1196,7 @@ cdef class _Timedelta(timedelta):
cdef:
pandas_timedeltastruct tds
- pandas_timedelta_to_timedeltastruct(self.value, self._creso, &tds)
+ pandas_timedelta_to_timedeltastruct(self._value, self._creso, &tds)
self._d = tds.days
self._h = tds.hrs
self._m = tds.min
@@ -1218,7 +1229,7 @@ cdef class _Timedelta(timedelta):
Any nanosecond resolution will be lost.
"""
if self._creso == NPY_FR_ns:
- return timedelta(microseconds=int(self.value) / 1000)
+ return timedelta(microseconds=int(self._value) / 1000)
# TODO(@WillAyd): is this the right way to use components?
self._ensure_components()
@@ -1234,7 +1245,7 @@ cdef class _Timedelta(timedelta):
str abbrev = npy_unit_to_abbrev(self._creso)
# TODO: way to create a np.timedelta64 obj with the reso directly
# instead of having to get the abbrev?
- return np.timedelta64(self.value, abbrev)
+ return np.timedelta64(self._value, abbrev)
def to_numpy(self, dtype=None, copy=False) -> np.timedelta64:
"""
@@ -1267,7 +1278,7 @@ cdef class _Timedelta(timedelta):
dtype : str or dtype
The dtype to view the underlying data as.
"""
- return np.timedelta64(self.value).view(dtype)
+ return np.timedelta64(self._value).view(dtype)
@property
def components(self):
@@ -1466,7 +1477,7 @@ cdef class _Timedelta(timedelta):
return self._repr_base(format="long")
def __bool__(self) -> bool:
- return self.value != 0
+ return self._value!= 0
def isoformat(self) -> str:
"""
@@ -1552,7 +1563,7 @@ cdef class _Timedelta(timedelta):
return self
try:
- value = convert_reso(self.value, self._creso, reso, round_ok=round_ok)
+ value = convert_reso(self._value, self._creso, reso, round_ok=round_ok)
except OverflowError as err:
unit = npy_unit_to_abbrev(reso)
raise OutOfBoundsTimedelta(
@@ -1767,11 +1778,11 @@ class Timedelta(_Timedelta):
reso = NPY_FR_ns
else:
value, reso = state
- self.value = value
+ self._value= value
self._creso = reso
def __reduce__(self):
- object_state = self.value, self._creso
+ object_state = self._value, self._creso
return (_timedelta_unpickle, object_state)
@cython.cdivision(True)
@@ -1785,7 +1796,7 @@ class Timedelta(_Timedelta):
to_offset(freq).nanos # raises on non-fixed freq
unit = delta_to_nanoseconds(to_offset(freq), self._creso)
- arr = np.array([self.value], dtype="i8")
+ arr = np.array([self._value], dtype="i8")
result = round_nsint64(arr, mode, unit)[0]
return Timedelta._from_value_and_reso(result, self._creso)
@@ -1851,7 +1862,7 @@ class Timedelta(_Timedelta):
return _timedelta_from_value_and_reso(
Timedelta,
- <int64_t>(other * self.value),
+ <int64_t>(other * self._value),
reso=self._creso,
)
@@ -1874,14 +1885,14 @@ class Timedelta(_Timedelta):
return np.nan
if other._creso != self._creso:
self, other = self._maybe_cast_to_matching_resos(other)
- return self.value / float(other.value)
+ return self._value/ float(other._value)
elif is_integer_object(other) or is_float_object(other):
# integers or floats
if util.is_nan(other):
return NaT
return Timedelta._from_value_and_reso(
- <int64_t>(self.value / other), self._creso
+ <int64_t>(self._value/ other), self._creso
)
elif is_array(other):
@@ -1901,7 +1912,7 @@ class Timedelta(_Timedelta):
return np.nan
if self._creso != other._creso:
self, other = self._maybe_cast_to_matching_resos(other)
- return float(other.value) / self.value
+ return float(other._value) / self._value
elif is_array(other):
if other.ndim == 0:
@@ -1929,12 +1940,12 @@ class Timedelta(_Timedelta):
return np.nan
if self._creso != other._creso:
self, other = self._maybe_cast_to_matching_resos(other)
- return self.value // other.value
+ return self._value// other._value
elif is_integer_object(other) or is_float_object(other):
if util.is_nan(other):
return NaT
- return type(self)._from_value_and_reso(self.value // other, self._creso)
+ return type(self)._from_value_and_reso(self._value// other, self._creso)
elif is_array(other):
if other.ndim == 0:
@@ -1978,7 +1989,7 @@ class Timedelta(_Timedelta):
return np.nan
if self._creso != other._creso:
self, other = self._maybe_cast_to_matching_resos(other)
- return other.value // self.value
+ return other._value// self._value
elif is_array(other):
if other.ndim == 0:
diff --git a/pandas/_libs/tslibs/timestamps.pxd b/pandas/_libs/tslibs/timestamps.pxd
index 1b87d2ba4eb25..26018cd904249 100644
--- a/pandas/_libs/tslibs/timestamps.pxd
+++ b/pandas/_libs/tslibs/timestamps.pxd
@@ -21,7 +21,7 @@ cdef _Timestamp create_timestamp_from_ts(int64_t value,
cdef class _Timestamp(ABCTimestamp):
cdef readonly:
- int64_t value, nanosecond, year
+ int64_t _value, nanosecond, year
NPY_DATETIMEUNIT _creso
cdef bint _get_start_end_field(self, str field, freq)
diff --git a/pandas/_libs/tslibs/timestamps.pyi b/pandas/_libs/tslibs/timestamps.pyi
index e6a36b69a8d47..26b0c9170aaa0 100644
--- a/pandas/_libs/tslibs/timestamps.pyi
+++ b/pandas/_libs/tslibs/timestamps.pyi
@@ -32,7 +32,7 @@ class Timestamp(datetime):
max: ClassVar[Timestamp]
resolution: ClassVar[Timedelta]
- value: int # np.int64
+ _value: int # np.int64
# error: "__new__" must return a class instance (got "Union[Timestamp, NaTType]")
def __new__( # type: ignore[misc]
cls: type[_DatetimeT],
@@ -56,6 +56,8 @@ class Timestamp(datetime):
cls, value: int, reso: int, tz: _tzinfo | None
) -> Timestamp: ...
@property
+ def value(self) -> int: ... # np.int64
+ @property
def year(self) -> int: ...
@property
def month(self) -> int: ...
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index fb3adda155254..c4025c7e5efe7 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -154,7 +154,7 @@ cdef _Timestamp create_timestamp_from_ts(
dts.day, dts.hour, dts.min,
dts.sec, dts.us, tz, fold=fold)
- ts_base.value = value
+ ts_base._value = value
ts_base.year = dts.year
ts_base.nanosecond = dts.ps // 1000
ts_base._creso = reso
@@ -232,6 +232,17 @@ cdef class _Timestamp(ABCTimestamp):
max = MinMaxReso("max")
resolution = MinMaxReso("resolution") # GH#21336, GH#21365
+ @property
+ def value(self) -> int:
+ try:
+ return convert_reso(self._value, self._creso, NPY_FR_ns, False)
+ except OverflowError:
+ raise OverflowError(
+ "Cannot convert Timestamp to nanoseconds without overflow. "
+ "Use `.asm8.view('i8')` to cast represent Timestamp in its own "
+ f"unit (here, {self.unit})."
+ )
+
@property
def unit(self) -> str:
"""
@@ -296,10 +307,10 @@ cdef class _Timestamp(ABCTimestamp):
def __hash__(_Timestamp self):
if self.nanosecond:
- return hash(self.value)
+ return hash(self._value)
if not (1 <= self.year <= 9999):
# out of bounds for pydatetime
- return hash(self.value)
+ return hash(self._value)
if self.fold:
return datetime.__hash__(self.replace(fold=0))
return datetime.__hash__(self)
@@ -371,7 +382,7 @@ cdef class _Timestamp(ABCTimestamp):
"Cannot compare tz-naive and tz-aware timestamps"
)
if self._creso == ots._creso:
- return cmp_scalar(self.value, ots.value, op)
+ return cmp_scalar(self._value, ots._value, op)
return self._compare_mismatched_resos(ots, op)
# TODO: copied from Timedelta; try to de-duplicate
@@ -382,8 +393,8 @@ cdef class _Timestamp(ABCTimestamp):
npy_datetimestruct dts_other
# dispatch to the datetimestruct utils instead of writing new ones!
- pandas_datetime_to_datetimestruct(self.value, self._creso, &dts_self)
- pandas_datetime_to_datetimestruct(other.value, other._creso, &dts_other)
+ pandas_datetime_to_datetimestruct(self._value, self._creso, &dts_self)
+ pandas_datetime_to_datetimestruct(other._value, other._creso, &dts_other)
return cmp_dtstructs(&dts_self, &dts_other, op)
cdef bint _compare_outside_nanorange(_Timestamp self, datetime other,
@@ -428,16 +439,16 @@ cdef class _Timestamp(ABCTimestamp):
elif self._creso > other._creso:
other = (<_Timedelta>other)._as_creso(self._creso, round_ok=True)
- nanos = other.value
+ nanos = other._value
try:
- new_value = self.value + nanos
+ new_value = self._value+ nanos
result = type(self)._from_value_and_reso(
new_value, reso=self._creso, tz=self.tzinfo
)
except OverflowError as err:
# TODO: don't hard-code nanosecond here
- new_value = int(self.value) + int(nanos)
+ new_value = int(self._value) + int(nanos)
raise OutOfBoundsDatetime(
f"Out of bounds nanosecond timestamp: {new_value}"
) from err
@@ -519,7 +530,7 @@ cdef class _Timestamp(ABCTimestamp):
# scalar Timestamp/datetime - Timestamp/datetime -> yields a
# Timedelta
try:
- res_value = self.value - other.value
+ res_value = self._value- other._value
return Timedelta._from_value_and_reso(res_value, self._creso)
except (OverflowError, OutOfBoundsDatetime, OutOfBoundsTimedelta) as err:
if isinstance(other, _Timestamp):
@@ -565,7 +576,7 @@ cdef class _Timestamp(ABCTimestamp):
pydatetime_to_dtstruct(self, &dts)
val = npy_datetimestruct_to_datetime(self._creso, &dts) + self.nanosecond
else:
- val = self.value
+ val = self._value
return val
@cython.boundscheck(False)
@@ -945,7 +956,7 @@ cdef class _Timestamp(ABCTimestamp):
return self.__reduce__()
def __setstate__(self, state):
- self.value = state[0]
+ self._value= state[0]
self.tzinfo = state[2]
if len(state) == 3:
@@ -957,7 +968,7 @@ cdef class _Timestamp(ABCTimestamp):
self._creso = reso
def __reduce__(self):
- object_state = self.value, None, self.tzinfo, self._creso
+ object_state = self._value, None, self.tzinfo, self._creso
return (_unpickle_timestamp, object_state)
# -----------------------------------------------------------------
@@ -1084,7 +1095,7 @@ cdef class _Timestamp(ABCTimestamp):
return self
try:
- value = convert_reso(self.value, self._creso, creso, round_ok=round_ok)
+ value = convert_reso(self._value, self._creso, creso, round_ok=round_ok)
except OverflowError as err:
unit = npy_unit_to_abbrev(creso)
raise OutOfBoundsDatetime(
@@ -1144,7 +1155,7 @@ cdef class _Timestamp(ABCTimestamp):
denom = periods_per_second(self._creso)
- return round(self.value / denom, 6)
+ return round(self._value/ denom, 6)
cpdef datetime to_pydatetime(_Timestamp self, bint warn=True):
"""
@@ -1177,7 +1188,7 @@ cdef class _Timestamp(ABCTimestamp):
"""
# TODO: find a way to construct dt64 directly from _reso
abbrev = npy_unit_to_abbrev(self._creso)
- return np.datetime64(self.value, abbrev)
+ return np.datetime64(self._value, abbrev)
def to_numpy(self, dtype=None, copy=False) -> np.datetime64:
"""
@@ -1672,9 +1683,9 @@ class Timestamp(_Timestamp):
# TODO: problem if nanos==0
if self.tz is not None:
- value = self.tz_localize(None).value
+ value = self.tz_localize(None)._value
else:
- value = self.value
+ value = self._value
value = np.array([value], dtype=np.int64)
@@ -2070,13 +2081,13 @@ default 'raise'
tz = maybe_get_tz(tz)
if not isinstance(ambiguous, str):
ambiguous = [ambiguous]
- value = tz_localize_to_utc_single(self.value, tz,
+ value = tz_localize_to_utc_single(self._value, tz,
ambiguous=ambiguous,
nonexistent=nonexistent,
creso=self._creso)
elif tz is None:
# reset tz
- value = tz_convert_from_utc_single(self.value, self.tz, creso=self._creso)
+ value = tz_convert_from_utc_single(self._value, self.tz, creso=self._creso)
else:
raise TypeError(
@@ -2136,7 +2147,7 @@ default 'raise'
else:
# Same UTC timestamp, different time zone
tz = maybe_get_tz(tz)
- out = type(self)._from_value_and_reso(self.value, reso=self._creso, tz=tz)
+ out = type(self)._from_value_and_reso(self._value, reso=self._creso, tz=tz)
return out
astimezone = tz_convert
@@ -2208,7 +2219,7 @@ default 'raise'
# set to naive if needed
tzobj = self.tzinfo
- value = self.value
+ value = self._value
# GH 37610. Preserve fold when replacing.
if fold is None:
diff --git a/pandas/core/arrays/_ranges.py b/pandas/core/arrays/_ranges.py
index baf8470a866ff..c93fc94685358 100644
--- a/pandas/core/arrays/_ranges.py
+++ b/pandas/core/arrays/_ranges.py
@@ -46,8 +46,8 @@ def generate_regular_range(
ndarray[np.int64]
Representing the given resolution.
"""
- istart = start.value if start is not None else None
- iend = end.value if end is not None else None
+ istart = start._value if start is not None else None
+ iend = end._value if end is not None else None
freq.nanos # raises if non-fixed frequency
td = Timedelta(freq)
try:
@@ -59,7 +59,7 @@ def generate_regular_range(
f"freq={freq} is incompatible with unit={unit}. "
"Use a lower freq or a higher unit instead."
) from err
- stride = int(td.value)
+ stride = int(td._value)
if periods is None and istart is not None and iend is not None:
b = istart
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 4b26528e6661c..437195bbcf7e9 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -176,7 +176,7 @@ def new_meth(self, *args, **kwargs):
if result is NaT:
return NaT
elif isinstance(result, Timestamp):
- return self._box_func(result.value)
+ return self._box_func(result._value)
res_i8 = result.view("i8")
return self._from_backing_data(res_i8)
@@ -1003,7 +1003,7 @@ def _get_i8_values_and_mask(
i8values = other.ordinal
mask = None
elif isinstance(other, (Timestamp, Timedelta)):
- i8values = other.value
+ i8values = other._value
mask = None
else:
# PeriodArray, DatetimeArray, TimedeltaArray
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 0766b1c6a5262..3563986a0a280 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -447,7 +447,7 @@ def _generate_range( # type: ignore[override]
xdr = _generate_range(
start=start, end=end, periods=periods, offset=freq, unit=unit
)
- i8values = np.array([x.value for x in xdr], dtype=np.int64)
+ i8values = np.array([x._value for x in xdr], dtype=np.int64)
endpoint_tz = start.tz if start is not None else end.tz
@@ -477,8 +477,8 @@ def _generate_range( # type: ignore[override]
# representable with doubles, so we limit the range that we
# pass to np.linspace as much as possible
i8values = (
- np.linspace(0, end.value - start.value, periods, dtype="int64")
- + start.value
+ np.linspace(0, end._value - start._value, periods, dtype="int64")
+ + start._value
)
if i8values.dtype != "i8":
# 2022-01-09 I (brock) am not sure if it is possible for this
@@ -489,8 +489,8 @@ def _generate_range( # type: ignore[override]
if not left_inclusive and not right_inclusive:
i8values = i8values[1:-1]
else:
- start_i8 = Timestamp(start).value
- end_i8 = Timestamp(end).value
+ start_i8 = Timestamp(start)._value
+ end_i8 = Timestamp(end)._value
if not left_inclusive or not right_inclusive:
if not left_inclusive and len(i8values) and i8values[0] == start_i8:
i8values = i8values[1:]
@@ -509,7 +509,7 @@ def _unbox_scalar(self, value) -> np.datetime64:
raise ValueError("'value' should be a Timestamp.")
self._check_compatible_with(value)
if value is NaT:
- return np.datetime64(value.value, self.unit)
+ return np.datetime64(value._value, self.unit)
else:
return value.as_unit(self.unit).asm8
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index e6682b0dea814..da1c94101b785 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -331,7 +331,7 @@ def _unbox_scalar( # type: ignore[override]
) -> np.int64:
if value is NaT:
# error: Item "Period" of "Union[Period, NaTType]" has no attribute "value"
- return np.int64(value.value) # type: ignore[union-attr]
+ return np.int64(value._value) # type: ignore[union-attr]
elif isinstance(value, self._scalar_type):
self._check_compatible_with(value)
return np.int64(value.ordinal)
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 861c9712cd2ae..4c9c1e234de3f 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -151,7 +151,7 @@ def _scalar_type(self) -> type[Timedelta]:
def _box_func(self, x: np.timedelta64) -> Timedelta | NaTType:
y = x.view("i8")
- if y == NaT.value:
+ if y == NaT._value:
return NaT
return Timedelta._from_value_and_reso(y, reso=self._creso)
@@ -298,7 +298,7 @@ def _generate_range( # type: ignore[override]
if freq is not None:
index = generate_regular_range(start, end, periods, freq, unit=unit)
else:
- index = np.linspace(start.value, end.value, periods).astype("i8")
+ index = np.linspace(start._value, end._value, periods).astype("i8")
if not left_closed:
index = index[1:]
@@ -316,7 +316,7 @@ def _unbox_scalar(self, value) -> np.timedelta64:
raise ValueError("'value' should be a Timedelta.")
self._check_compatible_with(value)
if value is NaT:
- return np.timedelta64(value.value, self.unit)
+ return np.timedelta64(value._value, self.unit)
else:
return value.as_unit(self.unit).asm8
diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py
index 04a6f6c6277ee..f7d1314b12da2 100644
--- a/pandas/core/computation/pytables.py
+++ b/pandas/core/computation/pytables.py
@@ -218,13 +218,13 @@ def stringify(value):
v = Timestamp(v).as_unit("ns")
if v.tz is not None:
v = v.tz_convert("UTC")
- return TermValue(v, v.value, kind)
+ return TermValue(v, v._value, kind)
elif kind in ("timedelta64", "timedelta"):
if isinstance(v, str):
v = Timedelta(v)
else:
v = Timedelta(v, unit="s")
- v = v.as_unit("ns").value
+ v = v.as_unit("ns")._value
return TermValue(int(v), v, kind)
elif meta == "category":
metadata = extract_array(self.metadata, extract_numpy=True)
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index cad1fb431dd84..dec691f7335e3 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -486,8 +486,8 @@ def _as_range_index(self) -> RangeIndex:
# Convert our i8 representations to RangeIndex
# Caller is responsible for checking isinstance(self.freq, Tick)
freq = cast(Tick, self.freq)
- tick = freq.delta.value
- rng = range(self[0].value, self[-1].value + tick, tick)
+ tick = freq.delta._value
+ rng = range(self[0]._value, self[-1]._value + tick, tick)
return RangeIndex(rng)
def _can_range_setop(self, other):
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 3b0b5d1d55a88..69dacb043010a 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -546,7 +546,7 @@ def _maybe_convert_i8(self, key):
if lib.is_period(key):
key_i8 = key.ordinal
elif isinstance(key_i8, Timestamp):
- key_i8 = key_i8.value
+ key_i8 = key_i8._value
elif isinstance(key_i8, (np.datetime64, np.timedelta64)):
key_i8 = key_i8.view("i8")
else:
diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py
index d46b51a2ee954..0e1c00b8a97b7 100644
--- a/pandas/core/internals/concat.py
+++ b/pandas/core/internals/concat.py
@@ -524,7 +524,7 @@ def get_reindexed_values(self, empty_dtype: DtypeObj, upcasted_na) -> ArrayLike:
if isinstance(empty_dtype, DatetimeTZDtype):
# NB: exclude e.g. pyarrow[dt64tz] dtypes
- i8values = np.full(self.shape, fill_value.value)
+ i8values = np.full(self.shape, fill_value._value)
return DatetimeArray(i8values, dtype=empty_dtype)
elif is_1d_only_ea_dtype(empty_dtype):
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 01d5591d3d590..13684d6db3a4c 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -2137,19 +2137,19 @@ def _adjust_dates_anchored(
origin_nanos = 0 # origin == "epoch"
if origin == "start_day":
- origin_nanos = first.normalize().value
+ origin_nanos = first.normalize()._value
elif origin == "start":
- origin_nanos = first.value
+ origin_nanos = first._value
elif isinstance(origin, Timestamp):
- origin_nanos = origin.as_unit("ns").value
+ origin_nanos = origin.as_unit("ns")._value
elif origin in ["end", "end_day"]:
origin_last = last if origin == "end" else last.ceil("D")
- sub_freq_times = (origin_last.value - first.value) // freq.nanos
+ sub_freq_times = (origin_last._value - first._value) // freq.nanos
if closed == "left":
sub_freq_times += 1
first = origin_last - sub_freq_times * freq
- origin_nanos = first.value
- origin_nanos += offset.value if offset else 0
+ origin_nanos = first._value
+ origin_nanos += offset._value if offset else 0
# GH 10117 & GH 19375. If first and last contain timezone information,
# Perform the calculation in UTC in order to avoid localizing on an
@@ -2161,34 +2161,34 @@ def _adjust_dates_anchored(
if last_tzinfo is not None:
last = last.tz_convert("UTC")
- foffset = (first.value - origin_nanos) % freq.nanos
- loffset = (last.value - origin_nanos) % freq.nanos
+ foffset = (first._value - origin_nanos) % freq.nanos
+ loffset = (last._value - origin_nanos) % freq.nanos
if closed == "right":
if foffset > 0:
# roll back
- fresult_int = first.value - foffset
+ fresult_int = first._value - foffset
else:
- fresult_int = first.value - freq.nanos
+ fresult_int = first._value - freq.nanos
if loffset > 0:
# roll forward
- lresult_int = last.value + (freq.nanos - loffset)
+ lresult_int = last._value + (freq.nanos - loffset)
else:
# already the end of the road
- lresult_int = last.value
+ lresult_int = last._value
else: # closed == 'left'
if foffset > 0:
- fresult_int = first.value - foffset
+ fresult_int = first._value - foffset
else:
# start of the road
- fresult_int = first.value
+ fresult_int = first._value
if loffset > 0:
# roll forward
- lresult_int = last.value + (freq.nanos - loffset)
+ lresult_int = last._value + (freq.nanos - loffset)
else:
- lresult_int = last.value + freq.nanos
+ lresult_int = last._value + freq.nanos
fresult = Timestamp(fresult_int)
lresult = Timestamp(lresult_int)
if first_tzinfo is not None:
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 7d8d7a37ff7e7..ad403ae19a274 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -2123,7 +2123,7 @@ def injection(obj):
lvs = ensure_wrapped_if_datetimelike(left_values)
tolerance = tolerance.as_unit(lvs.unit)
- tolerance = tolerance.value
+ tolerance = tolerance._value
# TODO: require left_values.dtype == right_values.dtype, or at least
# comparable for e.g. dt64tz
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 152bfcb8822a4..76e144c70c7d5 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -518,8 +518,8 @@ def _to_datetime_with_unit(arg, unit, name, utc: bool, errors: str) -> Index:
fvalues = (arg * mult).astype("f8", copy=False)
fvalues[mask] = 0
- if (fvalues < Timestamp.min.value).any() or (
- fvalues > Timestamp.max.value
+ if (fvalues < Timestamp.min._value).any() or (
+ fvalues > Timestamp.max._value
).any():
if errors != "raise":
arg = arg.astype(object)
diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py
index c0a7b2b7cc361..b61fa9a92539e 100644
--- a/pandas/core/window/ewm.py
+++ b/pandas/core/window/ewm.py
@@ -117,7 +117,7 @@ def _calculate_deltas(
"""
_times = np.asarray(times.view(np.int64), dtype=np.float64)
# TODO: generalize to non-nano?
- _halflife = float(Timedelta(halflife).as_unit("ns").value)
+ _halflife = float(Timedelta(halflife).as_unit("ns")._value)
return np.diff(_times) / _halflife
diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py
index ceb84161528cc..213acdadfddbc 100644
--- a/pandas/tests/arithmetic/test_datetime64.py
+++ b/pandas/tests/arithmetic/test_datetime64.py
@@ -1719,15 +1719,15 @@ def test_datetimeindex_sub_timestamp_overflow(self):
with pytest.raises(OverflowError, match=msg):
dtimax - variant
- expected = Timestamp.max.value - tspos.value
+ expected = Timestamp.max._value - tspos._value
for variant in ts_pos_variants:
res = dtimax - variant
- assert res[1].value == expected
+ assert res[1]._value == expected
- expected = Timestamp.min.value - tsneg.value
+ expected = Timestamp.min._value - tsneg._value
for variant in ts_neg_variants:
res = dtimin - variant
- assert res[1].value == expected
+ assert res[1]._value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
@@ -1742,13 +1742,13 @@ def test_datetimeindex_sub_datetimeindex_overflow(self):
ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"])
# General tests
- expected = Timestamp.max.value - ts_pos[1].value
+ expected = Timestamp.max._value - ts_pos[1]._value
result = dtimax - ts_pos
- assert result[1].value == expected
+ assert result[1]._value == expected
- expected = Timestamp.min.value - ts_neg[1].value
+ expected = Timestamp.min._value - ts_neg[1]._value
result = dtimin - ts_neg
- assert result[1].value == expected
+ assert result[1]._value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtimax - ts_neg
@@ -2442,4 +2442,4 @@ def test_dt64arr_addsub_object_dtype_2d():
result2 = dta - dta.astype(object)
assert result2.shape == (4, 1)
- assert all(td.value == 0 for td in result2.ravel())
+ assert all(td._value == 0 for td in result2.ravel())
diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py
index 0a79822d26e9a..65d39f7c7b9fb 100644
--- a/pandas/tests/arithmetic/test_timedelta64.py
+++ b/pandas/tests/arithmetic/test_timedelta64.py
@@ -690,7 +690,7 @@ def test_tdi_add_overflow(self):
with pytest.raises(OutOfBoundsDatetime, match="10155196800000000000"):
Timestamp("2000") + pd.to_timedelta(106580, "D")
- _NaT = NaT.value + 1
+ _NaT = NaT._value + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], "D") + Timestamp("2000")
@@ -754,7 +754,7 @@ def test_timedelta_ops_with_missing_values(self):
# supported GH#29794
DataFrame([NaT]).apply(pd.to_timedelta) # TODO: belongs elsewhere?
- dfn = DataFrame([NaT.value]).apply(pd.to_timedelta)
+ dfn = DataFrame([NaT._value]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta("00:00:01")
scalar2 = pd.to_timedelta("00:00:02")
diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py
index f80051fb187ba..362abee1d482a 100644
--- a/pandas/tests/arrays/test_datetimelike.py
+++ b/pandas/tests/arrays/test_datetimelike.py
@@ -339,7 +339,7 @@ def test_searchsorted_castable_strings(self, arr1d, box, string_storage):
def test_getitem_near_implementation_bounds(self):
# We only check tz-naive for DTA bc the bounds are slightly different
# for other tzs
- i8vals = np.asarray([NaT.value + n for n in range(1, 5)], dtype="i8")
+ i8vals = np.asarray([NaT._value + n for n in range(1, 5)], dtype="i8")
arr = self.array_cls(i8vals, freq="ns")
arr[0] # should not raise OutOfBoundsDatetime
@@ -815,7 +815,7 @@ def test_take_fill_valid(self, arr1d, fixed_now_ts):
# Timestamp with mismatched tz-awareness
arr.take([-1, 1], allow_fill=True, fill_value=now)
- value = NaT.value
+ value = NaT._value
msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got"
with pytest.raises(TypeError, match=msg):
# require NaT, not iNaT, as it could be confused with an integer
@@ -1038,7 +1038,7 @@ def test_astype_object(self, arr1d):
def test_take_fill_valid(self, arr1d):
arr = arr1d
- value = NaT.value
+ value = NaT._value
msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got"
with pytest.raises(TypeError, match=msg):
# require NaT, not iNaT, as it could be confused with an integer
@@ -1181,15 +1181,15 @@ def test_casting_nat_setitem_array(arr, casting_nats):
[
(
TimedeltaIndex(["1 Day", "3 Hours", "NaT"])._data,
- (np.datetime64("NaT", "ns"), NaT.value),
+ (np.datetime64("NaT", "ns"), NaT._value),
),
(
pd.date_range("2000-01-01", periods=3, freq="D")._data,
- (np.timedelta64("NaT", "ns"), NaT.value),
+ (np.timedelta64("NaT", "ns"), NaT._value),
),
(
pd.period_range("2000-01-01", periods=3, freq="D")._data,
- (np.datetime64("NaT", "ns"), np.timedelta64("NaT", "ns"), NaT.value),
+ (np.datetime64("NaT", "ns"), np.timedelta64("NaT", "ns"), NaT._value),
),
],
ids=lambda x: type(x).__name__,
diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py
index 53c9b3d174967..4bd6fd745d56d 100644
--- a/pandas/tests/arrays/test_datetimes.py
+++ b/pandas/tests/arrays/test_datetimes.py
@@ -132,7 +132,7 @@ def test_iter(self, dta):
expected = dta[0]
assert type(res) is pd.Timestamp
- assert res.value == expected.value
+ assert res._value == expected._value
assert res._creso == expected._creso
assert res == expected
diff --git a/pandas/tests/dtypes/cast/test_promote.py b/pandas/tests/dtypes/cast/test_promote.py
index 4f13858b0c131..68deb0972cebb 100644
--- a/pandas/tests/dtypes/cast/test_promote.py
+++ b/pandas/tests/dtypes/cast/test_promote.py
@@ -430,19 +430,6 @@ def test_maybe_promote_any_with_timedelta64(any_numpy_dtype, fill_value, request
expected_dtype = dtype
# for timedelta dtypes, scalar values get cast to pd.Timedelta.value
exp_val_for_scalar = pd.Timedelta(fill_value).to_timedelta64()
-
- if isinstance(fill_value, np.timedelta64) and fill_value.dtype != "m8[ns]":
- mark = pytest.mark.xfail(
- reason="maybe_promote not yet updated to handle non-nano "
- "Timedelta scalar"
- )
- request.node.add_marker(mark)
- elif type(fill_value) is datetime.timedelta:
- mark = pytest.mark.xfail(
- reason="maybe_promote not yet updated to handle non-nano "
- "Timedelta scalar"
- )
- request.node.add_marker(mark)
else:
expected_dtype = np.dtype(object)
exp_val_for_scalar = fill_value
diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py
index 9be0b95472d99..55dca3a3bc619 100644
--- a/pandas/tests/dtypes/test_missing.py
+++ b/pandas/tests/dtypes/test_missing.py
@@ -800,8 +800,8 @@ def test_empty_like(self):
int_na_vals = [
# Values that match iNaT, which we treat as null in specific cases
- np.int64(NaT.value),
- int(NaT.value),
+ np.int64(NaT._value),
+ int(NaT._value),
]
sometimes_na_vals = [Decimal("NaN")]
diff --git a/pandas/tests/frame/methods/test_sort_values.py b/pandas/tests/frame/methods/test_sort_values.py
index 43fcb25d122fb..026c3ae7011a0 100644
--- a/pandas/tests/frame/methods/test_sort_values.py
+++ b/pandas/tests/frame/methods/test_sort_values.py
@@ -355,7 +355,7 @@ def test_sort_values_nat_values_in_int_column(self):
# cause was that the int64 value NaT was considered as "na". Which is
# only correct for datetime64 columns.
- int_values = (2, int(NaT.value))
+ int_values = (2, int(NaT._value))
float_values = (2.0, -1.797693e308)
df = DataFrame(
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index 1fd61e6eb268e..e93dd022f46ac 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -927,7 +927,7 @@ def test_cummax(dtypes_for_minmax):
def test_cummax_i8_at_implementation_bound():
# the minimum value used to be treated as NPY_NAT+1 instead of NPY_NAT
# for int64 dtype GH#46382
- ser = Series([pd.NaT.value + n for n in range(5)])
+ ser = Series([pd.NaT._value + n for n in range(5)])
df = DataFrame({"A": 1, "B": ser, "C": ser.view("M8[ns]")})
gb = df.groupby("A")
diff --git a/pandas/tests/indexes/datetimes/methods/test_astype.py b/pandas/tests/indexes/datetimes/methods/test_astype.py
index 007204fd83bd4..8cf1edf5c7bf3 100644
--- a/pandas/tests/indexes/datetimes/methods/test_astype.py
+++ b/pandas/tests/indexes/datetimes/methods/test_astype.py
@@ -267,7 +267,7 @@ def _check_rng(rng):
)
def test_integer_index_astype_datetime(self, tz, dtype):
# GH 20997, 20964, 24559
- val = [Timestamp("2018-01-01", tz=tz).as_unit("ns").value]
+ val = [Timestamp("2018-01-01", tz=tz).as_unit("ns")._value]
result = Index(val, name="idx").astype(dtype)
expected = DatetimeIndex(["2018-01-01"], tz=tz, name="idx")
tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/datetimes/test_constructors.py b/pandas/tests/indexes/datetimes/test_constructors.py
index 1d82d3d9c5b9d..175f435fe9696 100644
--- a/pandas/tests/indexes/datetimes/test_constructors.py
+++ b/pandas/tests/indexes/datetimes/test_constructors.py
@@ -73,7 +73,7 @@ def test_freq_validation_with_nat(self, dt_cls):
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, Timestamp("2011-01-01")], freq="D")
with pytest.raises(ValueError, match=msg):
- dt_cls([pd.NaT, Timestamp("2011-01-01").value], freq="D")
+ dt_cls([pd.NaT, Timestamp("2011-01-01")._value], freq="D")
# TODO: better place for tests shared by DTI/TDI?
@pytest.mark.parametrize(
@@ -805,7 +805,7 @@ def test_constructor_timestamp_near_dst(self):
def test_constructor_with_int_tz(self, klass, box, tz, dtype):
# GH 20997, 20964
ts = Timestamp("2018-01-01", tz=tz).as_unit("ns")
- result = klass(box([ts.value]), dtype=dtype)
+ result = klass(box([ts._value]), dtype=dtype)
expected = klass([ts])
assert result == expected
@@ -1200,6 +1200,6 @@ def test_timestamp_constructor_adjust_value_for_fold(tz, ts_input, fold, value_o
# Check that we adjust value for fold correctly
# based on timestamps since utc
ts = Timestamp(ts_input, tz=tz, fold=fold)
- result = ts.value
+ result = ts._value
expected = value_out
assert result == expected
diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py
index 8979d99675589..ef909feccfcd3 100644
--- a/pandas/tests/indexes/datetimes/test_date_range.py
+++ b/pandas/tests/indexes/datetimes/test_date_range.py
@@ -1236,7 +1236,7 @@ def test_date_range_freq_lower_than_endpoints(self):
# but we can losslessly cast to "us"
dti = date_range(start, end, periods=2, unit="us")
rng = np.array(
- [start.as_unit("us").value, end.as_unit("us").value], dtype=np.int64
+ [start.as_unit("us")._value, end.as_unit("us")._value], dtype=np.int64
)
expected = DatetimeIndex(rng.view("M8[us]"))
tm.assert_index_equal(dti, expected)
diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py
index 04d1d8204a346..ecdea9ea25c9d 100644
--- a/pandas/tests/indexes/datetimes/test_indexing.py
+++ b/pandas/tests/indexes/datetimes/test_indexing.py
@@ -175,7 +175,7 @@ def test_where_invalid_dtypes(self):
tm.assert_index_equal(result, expected)
result = dti.where(mask, i2.asi8)
- expected = Index([pd.NaT.value, pd.NaT.value] + tail, dtype=object)
+ expected = Index([pd.NaT._value, pd.NaT._value] + tail, dtype=object)
assert isinstance(expected[0], int)
tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py
index 65207a4d7a60f..05700841de7e1 100644
--- a/pandas/tests/indexes/datetimes/test_timezones.py
+++ b/pandas/tests/indexes/datetimes/test_timezones.py
@@ -1138,7 +1138,7 @@ def test_dti_convert_tz_aware_datetime_datetime(self, tz):
assert timezones.tz_compare(result.tz, tz)
converted = to_datetime(dates_aware, utc=True)
- ex_vals = np.array([Timestamp(x).as_unit("ns").value for x in dates_aware])
+ ex_vals = np.array([Timestamp(x).as_unit("ns")._value for x in dates_aware])
tm.assert_numpy_array_equal(converted.asi8, ex_vals)
assert converted.tz is timezone.utc
diff --git a/pandas/tests/indexes/datetimes/test_unique.py b/pandas/tests/indexes/datetimes/test_unique.py
index 68ac770f612e6..c18bd99b67000 100644
--- a/pandas/tests/indexes/datetimes/test_unique.py
+++ b/pandas/tests/indexes/datetimes/test_unique.py
@@ -55,7 +55,7 @@ def test_index_unique(rand_series_with_duplicate_datetimeindex):
def test_index_unique2():
# NaT, note this is excluded
- arr = [1370745748 + t for t in range(20)] + [NaT.value]
+ arr = [1370745748 + t for t in range(20)] + [NaT._value]
idx = DatetimeIndex(arr * 3)
tm.assert_index_equal(idx.unique(), DatetimeIndex(arr))
assert idx.nunique() == 20
diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py
index a809af7e975e2..dedc3fdd00e08 100644
--- a/pandas/tests/indexes/interval/test_interval.py
+++ b/pandas/tests/indexes/interval/test_interval.py
@@ -378,7 +378,7 @@ def test_maybe_convert_i8(self, breaks):
# interval
interval = Interval(breaks[0], breaks[1])
result = index._maybe_convert_i8(interval)
- expected = Interval(breaks[0].value, breaks[1].value)
+ expected = Interval(breaks[0]._value, breaks[1]._value)
assert result == expected
# datetimelike index
@@ -388,7 +388,7 @@ def test_maybe_convert_i8(self, breaks):
# datetimelike scalar
result = index._maybe_convert_i8(breaks[0])
- expected = breaks[0].value
+ expected = breaks[0]._value
assert result == expected
# list-like of datetimelike scalars
@@ -410,7 +410,7 @@ def test_maybe_convert_i8_nat(self, breaks):
tm.assert_index_equal(result, expected)
to_convert = to_convert.insert(0, breaks[0])
- expected = expected.insert(0, float(breaks[0].value))
+ expected = expected.insert(0, float(breaks[0]._value))
result = index._maybe_convert_i8(to_convert)
tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py
index 2db1e8c72a87c..228fd2829c5d9 100644
--- a/pandas/tests/indexes/period/test_indexing.py
+++ b/pandas/tests/indexes/period/test_indexing.py
@@ -570,7 +570,7 @@ def test_where_invalid_dtypes(self):
mask = notna(i2)
result = pi.where(mask, i2.asi8)
- expected = pd.Index([NaT.value, NaT.value] + tail, dtype=object)
+ expected = pd.Index([NaT._value, NaT._value] + tail, dtype=object)
assert isinstance(expected[0], int)
tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/test_engines.py b/pandas/tests/indexes/test_engines.py
index 02d8c5b2a6a22..a4b7ce6822b80 100644
--- a/pandas/tests/indexes/test_engines.py
+++ b/pandas/tests/indexes/test_engines.py
@@ -32,7 +32,7 @@ class TestDatetimeEngine:
"scalar",
[
pd.Timedelta(pd.Timestamp("2016-01-01").asm8.view("m8[ns]")),
- pd.Timestamp("2016-01-01").value,
+ pd.Timestamp("2016-01-01")._value,
pd.Timestamp("2016-01-01").to_pydatetime(),
pd.Timestamp("2016-01-01").to_datetime64(),
],
@@ -58,7 +58,7 @@ class TestTimedeltaEngine:
"scalar",
[
pd.Timestamp(pd.Timedelta(days=42).asm8.view("datetime64[ns]")),
- pd.Timedelta(days=42).value,
+ pd.Timedelta(days=42)._value,
pd.Timedelta(days=42).to_pytimedelta(),
pd.Timedelta(days=42).to_timedelta64(),
],
diff --git a/pandas/tests/indexes/timedeltas/test_indexing.py b/pandas/tests/indexes/timedeltas/test_indexing.py
index cc166f9f32a34..2d2711520d44f 100644
--- a/pandas/tests/indexes/timedeltas/test_indexing.py
+++ b/pandas/tests/indexes/timedeltas/test_indexing.py
@@ -144,7 +144,7 @@ def test_where_invalid_dtypes(self, fixed_now_ts):
i2 = Index([NaT, NaT] + tail)
mask = notna(i2)
- expected = Index([NaT.value, NaT.value] + tail, dtype=object, name="idx")
+ expected = Index([NaT._value, NaT._value] + tail, dtype=object, name="idx")
assert isinstance(expected[0], int)
result = tdi.where(mask, i2.asi8)
tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexing/test_datetime.py b/pandas/tests/indexing/test_datetime.py
index dc2fe85679181..e21bc6a4850a3 100644
--- a/pandas/tests/indexing/test_datetime.py
+++ b/pandas/tests/indexing/test_datetime.py
@@ -16,7 +16,7 @@
class TestDatetimeIndex:
def test_get_loc_naive_dti_aware_str_deprecated(self):
# GH#46903
- ts = Timestamp("20130101").value
+ ts = Timestamp("20130101")._value
dti = pd.DatetimeIndex([ts + 50 + i for i in range(100)])
ser = Series(range(100), index=dti)
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 34ecc75769d0b..8aff76c0e1fee 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -2560,9 +2560,9 @@ def test_loc_setitem_mask_td64_series_value(self):
df_copy = df.copy()
ser = Series([td1])
- expected = df["col"].iloc[1].value
+ expected = df["col"].iloc[1]._value
df.loc[[True, False]] = ser
- result = df["col"].iloc[1].value
+ result = df["col"].iloc[1]._value
assert expected == result
tm.assert_frame_equal(df, df_copy)
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index cf69cebd3c05e..f59e1e8cbe43d 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -1001,7 +1001,7 @@ def test_mixed_timedelta_datetime(self):
frame = DataFrame({"a": [td, ts]}, dtype=object)
expected = DataFrame(
- {"a": [pd.Timedelta(td).as_unit("ns").value, ts.as_unit("ns").value]}
+ {"a": [pd.Timedelta(td).as_unit("ns")._value, ts.as_unit("ns")._value]}
)
result = read_json(frame.to_json(date_unit="ns"), dtype={"a": "int64"})
tm.assert_frame_equal(result, expected, check_index_type=False)
diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py
index a5d7a16f77a72..472b08963425b 100644
--- a/pandas/tests/io/json/test_ujson.py
+++ b/pandas/tests/io/json/test_ujson.py
@@ -395,16 +395,16 @@ def test_datetime_units(self):
stamp = Timestamp(val).as_unit("ns")
roundtrip = ujson.decode(ujson.encode(val, date_unit="s"))
- assert roundtrip == stamp.value // 10**9
+ assert roundtrip == stamp._value // 10**9
roundtrip = ujson.decode(ujson.encode(val, date_unit="ms"))
- assert roundtrip == stamp.value // 10**6
+ assert roundtrip == stamp._value // 10**6
roundtrip = ujson.decode(ujson.encode(val, date_unit="us"))
- assert roundtrip == stamp.value // 10**3
+ assert roundtrip == stamp._value // 10**3
roundtrip = ujson.decode(ujson.encode(val, date_unit="ns"))
- assert roundtrip == stamp.value
+ assert roundtrip == stamp._value
msg = "Invalid value 'foo' for option 'date_unit'"
with pytest.raises(ValueError, match=msg):
diff --git a/pandas/tests/io/pytables/test_timezones.py b/pandas/tests/io/pytables/test_timezones.py
index ba125ffd28581..058eaa9d0529d 100644
--- a/pandas/tests/io/pytables/test_timezones.py
+++ b/pandas/tests/io/pytables/test_timezones.py
@@ -159,7 +159,7 @@ def test_roundtrip_tz_aware_index(setup_path):
store.put("frame", df, format="fixed")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
- assert recons.index[0].value == 946706400000000000
+ assert recons.index[0]._value == 946706400000000000
def test_store_index_name_with_tz(setup_path):
diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py
index e0ae3da482b35..88e62ea0bc79a 100644
--- a/pandas/tests/reductions/test_reductions.py
+++ b/pandas/tests/reductions/test_reductions.py
@@ -63,7 +63,7 @@ def test_ops(self, opname, obj):
if getattr(obj, "tz", None) is not None:
# We need to de-localize before comparing to the numpy-produced result
expected = expected.astype("M8[ns]").astype("int64")
- assert result.value == expected
+ assert result._value == expected
else:
assert result == expected
diff --git a/pandas/tests/scalar/period/test_asfreq.py b/pandas/tests/scalar/period/test_asfreq.py
index 386ab4150c6ff..e652c63d46f18 100644
--- a/pandas/tests/scalar/period/test_asfreq.py
+++ b/pandas/tests/scalar/period/test_asfreq.py
@@ -668,7 +668,7 @@ def test_conv_microsecond(self):
start = per.start_time
expected = Timestamp("2020-01-30 15:57:27.576166")
assert start == expected
- assert start.value == per.ordinal * 1000
+ assert start._value == per.ordinal * 1000
per2 = Period("2300-01-01", "us")
msg = "2300-01-01"
diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py
index bb9a7dd9374b5..88b1bda6b645c 100644
--- a/pandas/tests/scalar/period/test_period.py
+++ b/pandas/tests/scalar/period/test_period.py
@@ -627,7 +627,7 @@ def test_to_timestamp(self):
def _ex(p):
if p.freq == "B":
return p.start_time + Timedelta(days=1, nanoseconds=-1)
- return Timestamp((p + p.freq).start_time.value - 1)
+ return Timestamp((p + p.freq).start_time._value - 1)
for fcode in from_lst:
p = Period("1982", freq=fcode)
@@ -839,7 +839,7 @@ def test_end_time(self):
p = Period("2012", freq="A")
def _ex(*args):
- return Timestamp(Timestamp(datetime(*args)).as_unit("ns").value - 1)
+ return Timestamp(Timestamp(datetime(*args)).as_unit("ns")._value - 1)
xp = _ex(2013, 1, 1)
assert xp == p.end_time
@@ -891,7 +891,7 @@ def test_end_time_business_friday(self):
def test_anchor_week_end_time(self):
def _ex(*args):
- return Timestamp(Timestamp(datetime(*args)).as_unit("ns").value - 1)
+ return Timestamp(Timestamp(datetime(*args)).as_unit("ns")._value - 1)
p = Period("2013-1-1", "W-SAT")
xp = _ex(2013, 1, 6)
diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py
index e310506935729..3192baacb4ef4 100644
--- a/pandas/tests/scalar/test_nat.py
+++ b/pandas/tests/scalar/test_nat.py
@@ -110,7 +110,7 @@ def test_equality(klass, value, request):
pytest.mark.xfail(reason="Period cannot parse empty string")
)
- assert klass(value).value == iNaT
+ assert klass(value)._value == iNaT
@pytest.mark.parametrize("klass", [Timestamp, Timedelta])
diff --git a/pandas/tests/scalar/timedelta/test_arithmetic.py b/pandas/tests/scalar/timedelta/test_arithmetic.py
index cbb53ed2b8e36..01ec11c306dcd 100644
--- a/pandas/tests/scalar/timedelta/test_arithmetic.py
+++ b/pandas/tests/scalar/timedelta/test_arithmetic.py
@@ -393,8 +393,8 @@ def test_td_mul_scalar(self, op):
assert op(td, np.nan) is NaT
- assert op(-1, td).value == -1 * td.value
- assert op(-1.0, td).value == -1.0 * td.value
+ assert op(-1, td)._value == -1 * td._value
+ assert op(-1.0, td)._value == -1.0 * td._value
msg = "unsupported operand type"
with pytest.raises(TypeError, match=msg):
@@ -463,11 +463,11 @@ def test_td_div_td64_non_nano(self):
# truediv
td = Timedelta("1 days 2 hours 3 ns")
result = td / np.timedelta64(1, "D")
- assert result == td.value / (86400 * 10**9)
+ assert result == td._value / (86400 * 10**9)
result = td / np.timedelta64(1, "s")
- assert result == td.value / 10**9
+ assert result == td._value / 10**9
result = td / np.timedelta64(1, "ns")
- assert result == td.value
+ assert result == td._value
# floordiv
td = Timedelta("1 days 2 hours 3 ns")
@@ -476,7 +476,7 @@ def test_td_div_td64_non_nano(self):
result = td // np.timedelta64(1, "s")
assert result == 93600
result = td // np.timedelta64(1, "ns")
- assert result == td.value
+ assert result == td._value
def test_td_div_numeric_scalar(self):
# GH#19738
diff --git a/pandas/tests/scalar/timedelta/test_constructors.py b/pandas/tests/scalar/timedelta/test_constructors.py
index e4120478370d1..ad9dd408fbeaf 100644
--- a/pandas/tests/scalar/timedelta/test_constructors.py
+++ b/pandas/tests/scalar/timedelta/test_constructors.py
@@ -44,7 +44,7 @@ def test_from_td64_retain_resolution():
obj = np.timedelta64(12345, "ms")
td = Timedelta(obj)
- assert td.value == obj.view("i8")
+ assert td._value == obj.view("i8")
assert td._creso == NpyDatetimeUnit.NPY_FR_ms.value
# Case where we cast to nearest-supported reso
@@ -95,22 +95,22 @@ def test_from_tick_reso():
def test_construction():
expected = np.timedelta64(10, "D").astype("m8[ns]").view("i8")
- assert Timedelta(10, unit="d").value == expected
- assert Timedelta(10.0, unit="d").value == expected
- assert Timedelta("10 days").value == expected
- assert Timedelta(days=10).value == expected
- assert Timedelta(days=10.0).value == expected
+ assert Timedelta(10, unit="d")._value == expected
+ assert Timedelta(10.0, unit="d")._value == expected
+ assert Timedelta("10 days")._value == expected
+ assert Timedelta(days=10)._value == expected
+ assert Timedelta(days=10.0)._value == expected
expected += np.timedelta64(10, "s").astype("m8[ns]").view("i8")
- assert Timedelta("10 days 00:00:10").value == expected
- assert Timedelta(days=10, seconds=10).value == expected
- assert Timedelta(days=10, milliseconds=10 * 1000).value == expected
- assert Timedelta(days=10, microseconds=10 * 1000 * 1000).value == expected
+ assert Timedelta("10 days 00:00:10")._value == expected
+ assert Timedelta(days=10, seconds=10)._value == expected
+ assert Timedelta(days=10, milliseconds=10 * 1000)._value == expected
+ assert Timedelta(days=10, microseconds=10 * 1000 * 1000)._value == expected
# rounding cases
- assert Timedelta(82739999850000).value == 82739999850000
+ assert Timedelta(82739999850000)._value == 82739999850000
assert "0 days 22:58:59.999850" in str(Timedelta(82739999850000))
- assert Timedelta(123072001000000).value == 123072001000000
+ assert Timedelta(123072001000000)._value == 123072001000000
assert "1 days 10:11:12.001" in str(Timedelta(123072001000000))
# string conversion with/without leading zero
@@ -200,7 +200,7 @@ def test_construction():
expected = np.timedelta64(10, "s").astype("m8[ns]").view("i8") + np.timedelta64(
500, "ms"
).astype("m8[ns]").view("i8")
- assert Timedelta(10.5, unit="s").value == expected
+ assert Timedelta(10.5, unit="s")._value == expected
# offset
assert to_timedelta(offsets.Hour(2)) == Timedelta(hours=2)
@@ -239,7 +239,7 @@ def test_td_construction_with_np_dtypes(npdtype, item):
# GH#8757: test construction with np dtypes
pykwarg, npkwarg = item
expected = np.timedelta64(1, npkwarg).astype("m8[ns]").view("i8")
- assert Timedelta(**{pykwarg: npdtype(1)}).value == expected
+ assert Timedelta(**{pykwarg: npdtype(1)})._value == expected
@pytest.mark.parametrize(
@@ -261,7 +261,7 @@ def test_td_construction_with_np_dtypes(npdtype, item):
def test_td_from_repr_roundtrip(val):
# round-trip both for string and value
td = Timedelta(val)
- assert Timedelta(td.value) == td
+ assert Timedelta(td._value) == td
assert Timedelta(str(td)) == td
assert Timedelta(td._repr_base(format="all")) == td
@@ -270,7 +270,7 @@ def test_td_from_repr_roundtrip(val):
def test_overflow_on_construction():
# GH#3374
- value = Timedelta("1day").value * 20169940
+ value = Timedelta("1day")._value * 20169940
msg = "Cannot cast 1742682816000000000000 from ns to 'ns' without overflow"
with pytest.raises(OutOfBoundsTimedelta, match=msg):
Timedelta(value)
@@ -512,3 +512,23 @@ class MyCustomTimedelta(Timedelta):
td = MyCustomTimedelta("1 minute")
assert isinstance(td, MyCustomTimedelta)
+
+
+def test_non_nano_value():
+ # https://github.com/pandas-dev/pandas/issues/49076
+ result = Timedelta(10, unit="D").as_unit("s").value
+ # `.value` shows nanoseconds, even though unit is 's'
+ assert result == 864000000000000
+
+ # out-of-nanoseconds-bounds `.value` raises informative message
+ msg = (
+ r"Cannot convert Timedelta to nanoseconds without overflow. "
+ r"Use `.asm8.view\('i8'\)` to cast represent Timedelta in its "
+ r"own unit \(here, s\).$"
+ )
+ td = Timedelta(1_000, "D").as_unit("s") * 1_000
+ with pytest.raises(OverflowError, match=msg):
+ td.value
+ # check that the suggested workaround actually works
+ result = td.asm8.view("i8")
+ assert result == 86400000000
diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py
index 924f756edb233..dc4399a4a38de 100644
--- a/pandas/tests/scalar/timedelta/test_timedelta.py
+++ b/pandas/tests/scalar/timedelta/test_timedelta.py
@@ -32,27 +32,27 @@ def test_as_unit(self):
assert td.as_unit("ns") is td
res = td.as_unit("us")
- assert res.value == td.value // 1000
+ assert res._value == td._value // 1000
assert res._creso == NpyDatetimeUnit.NPY_FR_us.value
rt = res.as_unit("ns")
- assert rt.value == td.value
+ assert rt._value == td._value
assert rt._creso == td._creso
res = td.as_unit("ms")
- assert res.value == td.value // 1_000_000
+ assert res._value == td._value // 1_000_000
assert res._creso == NpyDatetimeUnit.NPY_FR_ms.value
rt = res.as_unit("ns")
- assert rt.value == td.value
+ assert rt._value == td._value
assert rt._creso == td._creso
res = td.as_unit("s")
- assert res.value == td.value // 1_000_000_000
+ assert res._value == td._value // 1_000_000_000
assert res._creso == NpyDatetimeUnit.NPY_FR_s.value
rt = res.as_unit("ns")
- assert rt.value == td.value
+ assert rt._value == td._value
assert rt._creso == td._creso
def test_as_unit_overflows(self):
@@ -65,7 +65,7 @@ def test_as_unit_overflows(self):
td.as_unit("ns")
res = td.as_unit("ms")
- assert res.value == us // 1000
+ assert res._value == us // 1000
assert res._creso == NpyDatetimeUnit.NPY_FR_ms.value
def test_as_unit_rounding(self):
@@ -76,7 +76,7 @@ def test_as_unit_rounding(self):
assert res == expected
assert res._creso == NpyDatetimeUnit.NPY_FR_ms.value
- assert res.value == 1
+ assert res._value == 1
with pytest.raises(ValueError, match="Cannot losslessly convert units"):
td.as_unit("ms", round_ok=False)
@@ -85,13 +85,13 @@ def test_as_unit_non_nano(self):
# case where we are going neither to nor from nano
td = Timedelta(days=1).as_unit("ms")
assert td.days == 1
- assert td.value == 86_400_000
+ assert td._value == 86_400_000
assert td.components.days == 1
assert td._d == 1
assert td.total_seconds() == 86400
res = td.as_unit("us")
- assert res.value == 86_400_000_000
+ assert res._value == 86_400_000_000
assert res.components.days == 1
assert res.components.hours == 0
assert res._d == 1
@@ -129,7 +129,7 @@ def td(self, unit, val):
def test_from_value_and_reso(self, unit, val):
# Just checking that the fixture is giving us what we asked for
td = Timedelta._from_value_and_reso(val, unit)
- assert td.value == val
+ assert td._value == val
assert td._creso == unit
assert td.days == 106752
@@ -148,7 +148,7 @@ def test_mul_preserves_reso(self, td, unit):
# The td fixture should always be far from the implementation
# bound, so doubling does not risk overflow.
res = td * 2
- assert res.value == td.value * 2
+ assert res._value == td._value * 2
assert res._creso == unit
def test_cmp_cross_reso(self, td):
@@ -169,7 +169,7 @@ def test_to_timedelta64(self, td, unit):
for res in [td.to_timedelta64(), td.to_numpy(), td.asm8]:
assert isinstance(res, np.timedelta64)
- assert res.view("i8") == td.value
+ assert res.view("i8") == td._value
if unit == NpyDatetimeUnit.NPY_FR_s.value:
assert res.dtype == "m8[s]"
elif unit == NpyDatetimeUnit.NPY_FR_ms.value:
@@ -181,7 +181,7 @@ def test_truediv_timedeltalike(self, td):
assert td / td == 1
assert (2.5 * td) / td == 2.5
- other = Timedelta(td.value)
+ other = Timedelta(td._value)
msg = "Cannot cast 106752 days 00:00:00 to unit='ns' without overflow."
with pytest.raises(OutOfBoundsTimedelta, match=msg):
td / other
@@ -206,18 +206,18 @@ def test_truediv_numeric(self, td):
assert td / np.nan is NaT
res = td / 2
- assert res.value == td.value / 2
+ assert res._value == td._value / 2
assert res._creso == td._creso
res = td / 2.0
- assert res.value == td.value / 2
+ assert res._value == td._value / 2
assert res._creso == td._creso
def test_floordiv_timedeltalike(self, td):
assert td // td == 1
assert (2.5 * td) // td == 2
- other = Timedelta(td.value)
+ other = Timedelta(td._value)
msg = "Cannot cast 106752 days 00:00:00 to unit='ns' without overflow"
with pytest.raises(OutOfBoundsTimedelta, match=msg):
td // other
@@ -240,21 +240,21 @@ def test_floordiv_numeric(self, td):
assert td // np.nan is NaT
res = td // 2
- assert res.value == td.value // 2
+ assert res._value == td._value // 2
assert res._creso == td._creso
res = td // 2.0
- assert res.value == td.value // 2
+ assert res._value == td._value // 2
assert res._creso == td._creso
assert td // np.array(np.nan) is NaT
res = td // np.array(2)
- assert res.value == td.value // 2
+ assert res._value == td._value // 2
assert res._creso == td._creso
res = td // np.array(2.0)
- assert res.value == td.value // 2
+ assert res._value == td._value // 2
assert res._creso == td._creso
def test_addsub_mismatched_reso(self, td):
@@ -293,12 +293,12 @@ def test_addsub_mismatched_reso(self, td):
def test_min(self, td):
assert td.min <= td
assert td.min._creso == td._creso
- assert td.min.value == NaT.value + 1
+ assert td.min._value == NaT._value + 1
def test_max(self, td):
assert td.max >= td
assert td.max._creso == td._creso
- assert td.max.value == np.iinfo(np.int64).max
+ assert td.max._value == np.iinfo(np.int64).max
def test_resolution(self, td):
expected = Timedelta._from_value_and_reso(1, td._creso)
@@ -310,7 +310,7 @@ def test_resolution(self, td):
def test_timedelta_class_min_max_resolution():
# when accessed on the class (as opposed to an instance), we default
# to nanoseconds
- assert Timedelta.min == Timedelta(NaT.value + 1)
+ assert Timedelta.min == Timedelta(NaT._value + 1)
assert Timedelta.min._creso == NpyDatetimeUnit.NPY_FR_ns.value
assert Timedelta.max == Timedelta(np.iinfo(np.int64).max)
@@ -362,9 +362,9 @@ class TestTimedeltas:
def test_rounding_on_int_unit_construction(self, unit, value, expected):
# GH 12690
result = Timedelta(value, unit=unit)
- assert result.value == expected
+ assert result._value == expected
result = Timedelta(str(value) + unit)
- assert result.value == expected
+ assert result._value == expected
def test_total_seconds_scalar(self):
# see gh-10939
@@ -383,10 +383,10 @@ def test_conversion(self):
assert td == pydt
assert isinstance(pydt, timedelta) and not isinstance(pydt, Timedelta)
- assert td == np.timedelta64(td.value, "ns")
+ assert td == np.timedelta64(td._value, "ns")
td64 = td.to_timedelta64()
- assert td64 == np.timedelta64(td.value, "ns")
+ assert td64 == np.timedelta64(td._value, "ns")
assert td == td64
assert isinstance(td64, np.timedelta64)
@@ -425,8 +425,8 @@ def check(value):
assert abs(td) == Timedelta("13:48:48")
assert str(td) == "-1 days +10:11:12"
assert -td == Timedelta("0 days 13:48:48")
- assert -Timedelta("-1 days, 10:11:12").value == 49728000000000
- assert Timedelta("-1 days, 10:11:12").value == -49728000000000
+ assert -Timedelta("-1 days, 10:11:12")._value == 49728000000000
+ assert Timedelta("-1 days, 10:11:12")._value == -49728000000000
rng = to_timedelta("-1 days, 10:11:12.100123456")
assert rng.days == -1
@@ -882,8 +882,8 @@ def test_implementation_limits(self):
# GH 12727
# timedelta limits correspond to int64 boundaries
- assert min_td.value == iNaT + 1
- assert max_td.value == lib.i8max
+ assert min_td._value == iNaT + 1
+ assert max_td._value == lib.i8max
# Beyond lower limit, a NAT before the Overflow
assert (min_td - Timedelta(1, "ns")) is NaT
@@ -896,16 +896,16 @@ def test_implementation_limits(self):
max_td + Timedelta(1, "ns")
# Same tests using the internal nanosecond values
- td = Timedelta(min_td.value - 1, "ns")
+ td = Timedelta(min_td._value - 1, "ns")
assert td is NaT
msg = "Cannot cast -9223372036854775809 from ns to 'ns' without overflow"
with pytest.raises(OutOfBoundsTimedelta, match=msg):
- Timedelta(min_td.value - 2, "ns")
+ Timedelta(min_td._value - 2, "ns")
msg = "Cannot cast 9223372036854775808 from ns to 'ns' without overflow"
with pytest.raises(OutOfBoundsTimedelta, match=msg):
- Timedelta(max_td.value + 1, "ns")
+ Timedelta(max_td._value + 1, "ns")
def test_total_seconds_precision(self):
# GH 19458
@@ -963,5 +963,5 @@ def test_timedelta_attribute_precision():
result += td.microseconds
result *= 1000
result += td.nanoseconds
- expected = td.value
+ expected = td._value
assert result == expected
diff --git a/pandas/tests/scalar/timestamp/test_arithmetic.py b/pandas/tests/scalar/timestamp/test_arithmetic.py
index 0ad3436e55787..2dac346bc54d5 100644
--- a/pandas/tests/scalar/timestamp/test_arithmetic.py
+++ b/pandas/tests/scalar/timestamp/test_arithmetic.py
@@ -200,7 +200,7 @@ def test_timestamp_add_timedelta64_unit(self, other, expected_difference):
now = datetime.utcnow()
ts = Timestamp(now).as_unit("ns")
result = ts + other
- valdiff = result.value - ts.value
+ valdiff = result._value - ts._value
assert valdiff == expected_difference
ts2 = Timestamp(now)
diff --git a/pandas/tests/scalar/timestamp/test_constructors.py b/pandas/tests/scalar/timestamp/test_constructors.py
index 8129985ef9bea..5ea8fc53a6bab 100644
--- a/pandas/tests/scalar/timestamp/test_constructors.py
+++ b/pandas/tests/scalar/timestamp/test_constructors.py
@@ -160,11 +160,11 @@ def test_constructor(self):
for result in [Timestamp(date_str), Timestamp(date_obj)]:
result = result.as_unit("ns") # test originally written before non-nano
# only with timestring
- assert result.as_unit("ns").value == expected
+ assert result.as_unit("ns")._value == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
- assert result.as_unit("ns").value == expected
+ assert result.as_unit("ns")._value == expected
# with timezone
for tz, offset in timezones:
@@ -173,11 +173,11 @@ def test_constructor(self):
"ns"
) # test originally written before non-nano
expected_tz = expected - offset * 3600 * 1_000_000_000
- assert result.as_unit("ns").value == expected_tz
+ assert result.as_unit("ns")._value == expected_tz
# should preserve tz
result = Timestamp(result)
- assert result.as_unit("ns").value == expected_tz
+ assert result.as_unit("ns")._value == expected_tz
# should convert to UTC
if tz is not None:
@@ -185,7 +185,7 @@ def test_constructor(self):
else:
result = Timestamp(result, tz="UTC")
expected_utc = expected - offset * 3600 * 1_000_000_000
- assert result.as_unit("ns").value == expected_utc
+ assert result.as_unit("ns")._value == expected_utc
def test_constructor_with_stringoffset(self):
# GH 7833
@@ -217,31 +217,31 @@ def test_constructor_with_stringoffset(self):
for date_str, expected in tests:
for result in [Timestamp(date_str)]:
# only with timestring
- assert result.as_unit("ns").value == expected
+ assert result.as_unit("ns")._value == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
- assert result.as_unit("ns").value == expected
+ assert result.as_unit("ns")._value == expected
# with timezone
for tz, offset in timezones:
result = Timestamp(date_str, tz=tz)
expected_tz = expected
- assert result.as_unit("ns").value == expected_tz
+ assert result.as_unit("ns")._value == expected_tz
# should preserve tz
result = Timestamp(result)
- assert result.as_unit("ns").value == expected_tz
+ assert result.as_unit("ns")._value == expected_tz
# should convert to UTC
result = Timestamp(result).tz_convert("UTC")
expected_utc = expected
- assert result.as_unit("ns").value == expected_utc
+ assert result.as_unit("ns")._value == expected_utc
# This should be 2013-11-01 05:00 in UTC
# converted to Chicago tz
result = Timestamp("2013-11-01 00:00:00-0500", tz="America/Chicago")
- assert result.value == Timestamp("2013-11-01 05:00").value
+ assert result._value == Timestamp("2013-11-01 05:00")._value
expected = "Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')"
assert repr(result) == expected
assert result == eval(repr(result))
@@ -249,7 +249,7 @@ def test_constructor_with_stringoffset(self):
# This should be 2013-11-01 05:00 in UTC
# converted to Tokyo tz (+09:00)
result = Timestamp("2013-11-01 00:00:00-0500", tz="Asia/Tokyo")
- assert result.value == Timestamp("2013-11-01 05:00").value
+ assert result._value == Timestamp("2013-11-01 05:00")._value
expected = "Timestamp('2013-11-01 14:00:00+0900', tz='Asia/Tokyo')"
assert repr(result) == expected
assert result == eval(repr(result))
@@ -258,7 +258,7 @@ def test_constructor_with_stringoffset(self):
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Katmandu
result = Timestamp("2015-11-18 15:45:00+05:45", tz="Asia/Katmandu")
- assert result.value == Timestamp("2015-11-18 10:00").value
+ assert result._value == Timestamp("2015-11-18 10:00")._value
expected = "Timestamp('2015-11-18 15:45:00+0545', tz='Asia/Katmandu')"
assert repr(result) == expected
assert result == eval(repr(result))
@@ -266,7 +266,7 @@ def test_constructor_with_stringoffset(self):
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Kolkata
result = Timestamp("2015-11-18 15:30:00+05:30", tz="Asia/Kolkata")
- assert result.value == Timestamp("2015-11-18 10:00").value
+ assert result._value == Timestamp("2015-11-18 10:00")._value
expected = "Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata')"
assert repr(result) == expected
assert result == eval(repr(result))
@@ -491,12 +491,12 @@ def test_invalid_date_kwarg_with_string_input(self, arg):
def test_out_of_bounds_integer_value(self):
# GH#26651 check that we raise OutOfBoundsDatetime, not OverflowError
- msg = str(Timestamp.max.value * 2)
+ msg = str(Timestamp.max._value * 2)
with pytest.raises(OutOfBoundsDatetime, match=msg):
- Timestamp(Timestamp.max.value * 2)
- msg = str(Timestamp.min.value * 2)
+ Timestamp(Timestamp.max._value * 2)
+ msg = str(Timestamp.min._value * 2)
with pytest.raises(OutOfBoundsDatetime, match=msg):
- Timestamp(Timestamp.min.value * 2)
+ Timestamp(Timestamp.min._value * 2)
def test_out_of_bounds_value(self):
one_us = np.timedelta64(1).astype("timedelta64[us]")
@@ -563,7 +563,7 @@ def test_bounds_with_different_units(self):
ts = Timestamp(dt64)
if unit in ["s", "ms", "us"]:
# We can preserve the input unit
- assert ts.value == dt64.view("i8")
+ assert ts._value == dt64.view("i8")
else:
# we chose the closest unit that we _do_ support
assert ts._creso == NpyDatetimeUnit.NPY_FR_s.value
@@ -724,8 +724,8 @@ def test_constructor_ambigous_dst():
# on Timestamp created from ambiguous time
# doesn't change Timestamp.value
ts = Timestamp(1382835600000000000, tz="dateutil/Europe/London")
- expected = ts.value
- result = Timestamp(ts).value
+ expected = ts._value
+ result = Timestamp(ts)._value
assert result == expected
@@ -738,7 +738,7 @@ def test_constructor_before_dst_switch(epoch):
ts = Timestamp(epoch, tz="dateutil/America/Los_Angeles")
result = ts.tz.dst(ts)
expected = timedelta(seconds=0)
- assert Timestamp(ts).value == epoch
+ assert Timestamp(ts)._value == epoch
assert result == expected
@@ -767,3 +767,23 @@ def test_timestamp_nano_range(nano):
# GH 48255
with pytest.raises(ValueError, match="nanosecond must be in 0..999"):
Timestamp(year=2022, month=1, day=1, nanosecond=nano)
+
+
+def test_non_nano_value():
+ # https://github.com/pandas-dev/pandas/issues/49076
+ result = Timestamp("1800-01-01", unit="s").value
+ # `.value` shows nanoseconds, even though unit is 's'
+ assert result == -5364662400000000000
+
+ # out-of-nanoseconds-bounds `.value` raises informative message
+ msg = (
+ r"Cannot convert Timestamp to nanoseconds without overflow. "
+ r"Use `.asm8.view\('i8'\)` to cast represent Timestamp in its "
+ r"own unit \(here, s\).$"
+ )
+ ts = Timestamp("0300-01-01")
+ with pytest.raises(OverflowError, match=msg):
+ ts.value
+ # check that the suggested workaround actually works
+ result = ts.asm8.view("i8")
+ assert result == -52700112000
diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py
index 22f5286569c6e..968ec122cde20 100644
--- a/pandas/tests/scalar/timestamp/test_timestamp.py
+++ b/pandas/tests/scalar/timestamp/test_timestamp.py
@@ -245,7 +245,7 @@ def test_utc_z_designator(self):
def test_asm8(self):
np.random.seed(7_960_929)
- ns = [Timestamp.min.value, Timestamp.max.value, 1000]
+ ns = [Timestamp.min._value, Timestamp.max._value, 1000]
for n in ns:
assert (
@@ -256,7 +256,7 @@ def test_asm8(self):
def test_class_ops_pytz(self):
def compare(x, y):
- assert int((Timestamp(x).value - Timestamp(y).value) / 1e9) == 0
+ assert int((Timestamp(x)._value - Timestamp(y)._value) / 1e9) == 0
compare(Timestamp.now(), datetime.now())
compare(Timestamp.now("UTC"), datetime.now(pytz.timezone("UTC")))
@@ -291,8 +291,8 @@ def test_class_ops_dateutil(self):
def compare(x, y):
assert (
int(
- np.round(Timestamp(x).value / 1e9)
- - np.round(Timestamp(y).value / 1e9)
+ np.round(Timestamp(x)._value / 1e9)
+ - np.round(Timestamp(y)._value / 1e9)
)
== 0
)
@@ -386,24 +386,24 @@ def test_roundtrip(self):
# further test accessors
base = Timestamp("20140101 00:00:00").as_unit("ns")
- result = Timestamp(base.value + Timedelta("5ms").value)
+ result = Timestamp(base._value + Timedelta("5ms")._value)
assert result == Timestamp(f"{base}.005000")
assert result.microsecond == 5000
- result = Timestamp(base.value + Timedelta("5us").value)
+ result = Timestamp(base._value + Timedelta("5us")._value)
assert result == Timestamp(f"{base}.000005")
assert result.microsecond == 5
- result = Timestamp(base.value + Timedelta("5ns").value)
+ result = Timestamp(base._value + Timedelta("5ns")._value)
assert result == Timestamp(f"{base}.000000005")
assert result.nanosecond == 5
assert result.microsecond == 0
- result = Timestamp(base.value + Timedelta("6ms 5us").value)
+ result = Timestamp(base._value + Timedelta("6ms 5us")._value)
assert result == Timestamp(f"{base}.006005")
assert result.microsecond == 5 + 6 * 1000
- result = Timestamp(base.value + Timedelta("200ms 5us").value)
+ result = Timestamp(base._value + Timedelta("200ms 5us")._value)
assert result == Timestamp(f"{base}.200005")
assert result.microsecond == 5 + 200 * 1000
@@ -446,24 +446,24 @@ def test_nanosecond_string_parsing(self):
# GH 7878
expected_repr = "2013-05-01 07:15:45.123456789"
expected_value = 1_367_392_545_123_456_789
- assert ts.value == expected_value
+ assert ts._value == expected_value
assert expected_repr in repr(ts)
ts = Timestamp("2013-05-01 07:15:45.123456789+09:00", tz="Asia/Tokyo")
- assert ts.value == expected_value - 9 * 3600 * 1_000_000_000
+ assert ts._value == expected_value - 9 * 3600 * 1_000_000_000
assert expected_repr in repr(ts)
ts = Timestamp("2013-05-01 07:15:45.123456789", tz="UTC")
- assert ts.value == expected_value
+ assert ts._value == expected_value
assert expected_repr in repr(ts)
ts = Timestamp("2013-05-01 07:15:45.123456789", tz="US/Eastern")
- assert ts.value == expected_value + 4 * 3600 * 1_000_000_000
+ assert ts._value == expected_value + 4 * 3600 * 1_000_000_000
assert expected_repr in repr(ts)
# GH 10041
ts = Timestamp("20130501T071545.123456789")
- assert ts.value == expected_value
+ assert ts._value == expected_value
assert expected_repr in repr(ts)
def test_nanosecond_timestamp(self):
@@ -471,33 +471,33 @@ def test_nanosecond_timestamp(self):
expected = 1_293_840_000_000_000_005
t = Timestamp("2011-01-01") + offsets.Nano(5)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
- assert t.value == expected
+ assert t._value == expected
assert t.nanosecond == 5
t = Timestamp(t)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
- assert t.value == expected
+ assert t._value == expected
assert t.nanosecond == 5
t = Timestamp("2011-01-01 00:00:00.000000005")
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
- assert t.value == expected
+ assert t._value == expected
assert t.nanosecond == 5
expected = 1_293_840_000_000_000_010
t = t + offsets.Nano(5)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
- assert t.value == expected
+ assert t._value == expected
assert t.nanosecond == 10
t = Timestamp(t)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
- assert t.value == expected
+ assert t._value == expected
assert t.nanosecond == 10
t = Timestamp("2011-01-01 00:00:00.000000010")
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
- assert t.value == expected
+ assert t._value == expected
assert t.nanosecond == 10
@@ -534,7 +534,7 @@ def test_conversion(self):
assert type(result) == type(expected)
result = ts.to_datetime64()
- expected = np.datetime64(ts.value, "ns")
+ expected = np.datetime64(ts._value, "ns")
assert result == expected
assert type(result) == type(expected)
assert result.dtype == expected.dtype
@@ -588,7 +588,8 @@ def test_to_datetime_bijective(self):
pydt_max = Timestamp.max.to_pydatetime()
assert (
- Timestamp(pydt_max).as_unit("ns").value / 1000 == Timestamp.max.value / 1000
+ Timestamp(pydt_max).as_unit("ns")._value / 1000
+ == Timestamp.max._value / 1000
)
exp_warning = None if Timestamp.min.nanosecond == 0 else UserWarning
@@ -601,8 +602,8 @@ def test_to_datetime_bijective(self):
assert pydt_min + tdus > Timestamp.min
assert (
- Timestamp(pydt_min + tdus).as_unit("ns").value / 1000
- == Timestamp.min.value / 1000
+ Timestamp(pydt_min + tdus).as_unit("ns")._value / 1000
+ == Timestamp.min._value / 1000
)
def test_to_period_tz_warning(self):
@@ -664,10 +665,10 @@ def ts(self, dt64):
@pytest.fixture
def ts_tz(self, ts, tz_aware_fixture):
tz = maybe_get_tz(tz_aware_fixture)
- return Timestamp._from_value_and_reso(ts.value, ts._creso, tz)
+ return Timestamp._from_value_and_reso(ts._value, ts._creso, tz)
def test_non_nano_construction(self, dt64, ts, reso):
- assert ts.value == dt64.view("i8")
+ assert ts._value == dt64.view("i8")
if reso == "s":
assert ts._creso == NpyDatetimeUnit.NPY_FR_s.value
@@ -714,7 +715,7 @@ def test_month_name(self, dt64, ts):
assert ts.month_name() == alt.month_name()
def test_tz_convert(self, ts):
- ts = Timestamp._from_value_and_reso(ts.value, ts._creso, utc)
+ ts = Timestamp._from_value_and_reso(ts._value, ts._creso, utc)
tz = pytz.timezone("US/Pacific")
result = ts.tz_convert(tz)
@@ -788,7 +789,7 @@ def test_cmp_cross_reso_reversed_dt64(self):
def test_pickle(self, ts, tz_aware_fixture):
tz = tz_aware_fixture
tz = maybe_get_tz(tz)
- ts = Timestamp._from_value_and_reso(ts.value, ts._creso, tz)
+ ts = Timestamp._from_value_and_reso(ts._value, ts._creso, tz)
rt = tm.round_trip_pickle(ts)
assert rt._creso == ts._creso
assert rt == ts
@@ -888,12 +889,12 @@ def test_sub_datetimelike_mismatched_reso(self, ts_tz):
result = ts - other
assert isinstance(result, Timedelta)
- assert result.value == 0
+ assert result._value == 0
assert result._creso == max(ts._creso, other._creso)
result = other - ts
assert isinstance(result, Timedelta)
- assert result.value == 0
+ assert result._value == 0
assert result._creso == max(ts._creso, other._creso)
if ts._creso < other._creso:
@@ -983,12 +984,12 @@ def test_sub_timedelta64_mismatched_reso(self, ts_tz):
def test_min(self, ts):
assert ts.min <= ts
assert ts.min._creso == ts._creso
- assert ts.min.value == NaT.value + 1
+ assert ts.min._value == NaT._value + 1
def test_max(self, ts):
assert ts.max >= ts
assert ts.max._creso == ts._creso
- assert ts.max.value == np.iinfo(np.int64).max
+ assert ts.max._value == np.iinfo(np.int64).max
def test_resolution(self, ts):
expected = Timedelta._from_value_and_reso(1, ts._creso)
@@ -1000,7 +1001,7 @@ def test_resolution(self, ts):
def test_timestamp_class_min_max_resolution():
# when accessed on the class (as opposed to an instance), we default
# to nanoseconds
- assert Timestamp.min == Timestamp(NaT.value + 1)
+ assert Timestamp.min == Timestamp(NaT._value + 1)
assert Timestamp.min._creso == NpyDatetimeUnit.NPY_FR_ns.value
assert Timestamp.max == Timestamp(np.iinfo(np.int64).max)
@@ -1018,27 +1019,27 @@ def test_as_unit(self):
assert ts.as_unit("ns") is ts
res = ts.as_unit("us")
- assert res.value == ts.value // 1000
+ assert res._value == ts._value // 1000
assert res._creso == NpyDatetimeUnit.NPY_FR_us.value
rt = res.as_unit("ns")
- assert rt.value == ts.value
+ assert rt._value == ts._value
assert rt._creso == ts._creso
res = ts.as_unit("ms")
- assert res.value == ts.value // 1_000_000
+ assert res._value == ts._value // 1_000_000
assert res._creso == NpyDatetimeUnit.NPY_FR_ms.value
rt = res.as_unit("ns")
- assert rt.value == ts.value
+ assert rt._value == ts._value
assert rt._creso == ts._creso
res = ts.as_unit("s")
- assert res.value == ts.value // 1_000_000_000
+ assert res._value == ts._value // 1_000_000_000
assert res._creso == NpyDatetimeUnit.NPY_FR_s.value
rt = res.as_unit("ns")
- assert rt.value == ts.value
+ assert rt._value == ts._value
assert rt._creso == ts._creso
def test_as_unit_overflows(self):
@@ -1051,7 +1052,7 @@ def test_as_unit_overflows(self):
ts.as_unit("ns")
res = ts.as_unit("ms")
- assert res.value == us // 1000
+ assert res._value == us // 1000
assert res._creso == NpyDatetimeUnit.NPY_FR_ms.value
def test_as_unit_rounding(self):
@@ -1062,7 +1063,7 @@ def test_as_unit_rounding(self):
assert res == expected
assert res._creso == NpyDatetimeUnit.NPY_FR_ms.value
- assert res.value == 1
+ assert res._value == 1
with pytest.raises(ValueError, match="Cannot losslessly convert units"):
ts.as_unit("ms", round_ok=False)
@@ -1076,7 +1077,7 @@ def test_as_unit_non_nano(self):
assert ts.hour == ts.minute == ts.second == ts.microsecond == ts.nanosecond == 0
res = ts.as_unit("s")
- assert res.value == 24 * 3600
+ assert res._value == 24 * 3600
assert res.year == 1970
assert res.month == 1
assert res.day == 2
diff --git a/pandas/tests/scalar/timestamp/test_timezones.py b/pandas/tests/scalar/timestamp/test_timezones.py
index e2df4d23bd858..820b2e17a9d3f 100644
--- a/pandas/tests/scalar/timestamp/test_timezones.py
+++ b/pandas/tests/scalar/timestamp/test_timezones.py
@@ -49,7 +49,7 @@ def test_tz_localize_pushes_out_of_bounds(self):
f"underflows past {Timestamp.min}"
)
pac = Timestamp.min.tz_localize("US/Pacific")
- assert pac.value > Timestamp.min.value
+ assert pac._value > Timestamp.min._value
pac.tz_convert("Asia/Tokyo") # tz_convert doesn't change value
with pytest.raises(OutOfBoundsDatetime, match=msg):
Timestamp.min.tz_localize("Asia/Tokyo")
@@ -60,7 +60,7 @@ def test_tz_localize_pushes_out_of_bounds(self):
f"overflows past {Timestamp.max}"
)
tokyo = Timestamp.max.tz_localize("Asia/Tokyo")
- assert tokyo.value < Timestamp.max.value
+ assert tokyo._value < Timestamp.max._value
tokyo.tz_convert("US/Pacific") # tz_convert doesn't change value
with pytest.raises(OutOfBoundsDatetime, match=msg):
Timestamp.max.tz_localize("US/Pacific")
@@ -103,7 +103,7 @@ def test_tz_localize_ambiguous(self):
ts_dst = ts.tz_localize("US/Eastern", ambiguous=True)
ts_no_dst = ts.tz_localize("US/Eastern", ambiguous=False)
- assert ts_no_dst.value - ts_dst.value == 3600
+ assert ts_no_dst._value - ts_dst._value == 3600
msg = re.escape(
"'ambiguous' parameter must be one of: "
"True, False, 'NaT', 'raise' (default)"
@@ -189,8 +189,8 @@ def test_tz_localize_ambiguous_compat(self):
dateutil_zone = "dateutil/Europe/London"
result_pytz = naive.tz_localize(pytz_zone, ambiguous=False)
result_dateutil = naive.tz_localize(dateutil_zone, ambiguous=False)
- assert result_pytz.value == result_dateutil.value
- assert result_pytz.value == 1382835600
+ assert result_pytz._value == result_dateutil._value
+ assert result_pytz._value == 1382835600
# fixed ambiguous behavior
# see gh-14621, GH#45087
@@ -201,8 +201,8 @@ def test_tz_localize_ambiguous_compat(self):
# 1 hour difference
result_pytz = naive.tz_localize(pytz_zone, ambiguous=True)
result_dateutil = naive.tz_localize(dateutil_zone, ambiguous=True)
- assert result_pytz.value == result_dateutil.value
- assert result_pytz.value == 1382832000
+ assert result_pytz._value == result_dateutil._value
+ assert result_pytz._value == 1382832000
# see gh-14621
assert str(result_pytz) == str(result_dateutil)
@@ -431,7 +431,7 @@ def test_timestamp_constructor_near_dst_boundary(self):
Timestamp("2017-03-26 02:00", tz="Europe/Paris")
result = Timestamp("2017-03-26 02:00:00+0100", tz="Europe/Paris")
- naive = Timestamp(result.as_unit("ns").value)
+ naive = Timestamp(result.as_unit("ns")._value)
expected = naive.tz_localize("UTC").tz_convert("Europe/Paris")
assert result == expected
diff --git a/pandas/tests/scalar/timestamp/test_unary_ops.py b/pandas/tests/scalar/timestamp/test_unary_ops.py
index f87922336b714..62bfede7f4261 100644
--- a/pandas/tests/scalar/timestamp/test_unary_ops.py
+++ b/pandas/tests/scalar/timestamp/test_unary_ops.py
@@ -268,21 +268,21 @@ def test_round_int64(self, timestamp, freq):
# test floor
result = dt.floor(freq)
- assert result.value % unit == 0, f"floor not a {freq} multiple"
- assert 0 <= dt.value - result.value < unit, "floor error"
+ assert result._value % unit == 0, f"floor not a {freq} multiple"
+ assert 0 <= dt._value - result._value < unit, "floor error"
# test ceil
result = dt.ceil(freq)
- assert result.value % unit == 0, f"ceil not a {freq} multiple"
- assert 0 <= result.value - dt.value < unit, "ceil error"
+ assert result._value % unit == 0, f"ceil not a {freq} multiple"
+ assert 0 <= result._value - dt._value < unit, "ceil error"
# test round
result = dt.round(freq)
- assert result.value % unit == 0, f"round not a {freq} multiple"
- assert abs(result.value - dt.value) <= unit // 2, "round error"
- if unit % 2 == 0 and abs(result.value - dt.value) == unit // 2:
+ assert result._value % unit == 0, f"round not a {freq} multiple"
+ assert abs(result._value - dt._value) <= unit // 2, "round error"
+ if unit % 2 == 0 and abs(result._value - dt._value) == unit // 2:
# round half to even
- assert result.value // unit % 2 == 0, "round half to even error"
+ assert result._value // unit % 2 == 0, "round half to even error"
def test_round_implementation_bounds(self):
# See also: analogous test for Timedelta
@@ -315,7 +315,7 @@ def test_round_sanity(self, val, method):
def checker(res, ts, nanos):
if method is Timestamp.round:
- diff = np.abs((res - ts).value)
+ diff = np.abs((res - ts)._value)
assert diff <= nanos / 2
elif method is Timestamp.floor:
assert res <= ts
@@ -326,38 +326,38 @@ def checker(res, ts, nanos):
res = method(ts, "us")
nanos = 1000
- assert np.abs((res - ts).value) < nanos
- assert res.value % nanos == 0
+ assert np.abs((res - ts)._value) < nanos
+ assert res._value % nanos == 0
checker(res, ts, nanos)
res = method(ts, "ms")
nanos = 1_000_000
- assert np.abs((res - ts).value) < nanos
- assert res.value % nanos == 0
+ assert np.abs((res - ts)._value) < nanos
+ assert res._value % nanos == 0
checker(res, ts, nanos)
res = method(ts, "s")
nanos = 1_000_000_000
- assert np.abs((res - ts).value) < nanos
- assert res.value % nanos == 0
+ assert np.abs((res - ts)._value) < nanos
+ assert res._value % nanos == 0
checker(res, ts, nanos)
res = method(ts, "min")
nanos = 60 * 1_000_000_000
- assert np.abs((res - ts).value) < nanos
- assert res.value % nanos == 0
+ assert np.abs((res - ts)._value) < nanos
+ assert res._value % nanos == 0
checker(res, ts, nanos)
res = method(ts, "h")
nanos = 60 * 60 * 1_000_000_000
- assert np.abs((res - ts).value) < nanos
- assert res.value % nanos == 0
+ assert np.abs((res - ts)._value) < nanos
+ assert res._value % nanos == 0
checker(res, ts, nanos)
res = method(ts, "D")
nanos = 24 * 60 * 60 * 1_000_000_000
- assert np.abs((res - ts).value) < nanos
- assert res.value % nanos == 0
+ assert np.abs((res - ts)._value) < nanos
+ assert res._value % nanos == 0
checker(res, ts, nanos)
# --------------------------------------------------------------
diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py
index 1a52e02ee314f..3d09954ed3c0f 100644
--- a/pandas/tests/series/indexing/test_setitem.py
+++ b/pandas/tests/series/indexing/test_setitem.py
@@ -949,7 +949,7 @@ class TestSetitemNAPeriodDtype(SetitemCastingEquivalents):
@pytest.fixture
def expected(self, key):
exp = Series(period_range("2000-01-01", periods=10, freq="D"))
- exp._values.view("i8")[key] = NaT.value
+ exp._values.view("i8")[key] = NaT._value
assert exp[key] is NaT or all(x is NaT for x in exp[key])
return exp
diff --git a/pandas/tests/series/methods/test_asof.py b/pandas/tests/series/methods/test_asof.py
index 8cb0328eb8634..6ae978b22e24d 100644
--- a/pandas/tests/series/methods/test_asof.py
+++ b/pandas/tests/series/methods/test_asof.py
@@ -17,7 +17,7 @@
class TestSeriesAsof:
def test_asof_nanosecond_index_access(self):
- ts = Timestamp("20130101").as_unit("ns").value
+ ts = Timestamp("20130101").as_unit("ns")._value
dti = DatetimeIndex([ts + 50 + i for i in range(100)])
ser = Series(np.random.randn(100), index=dti)
diff --git a/pandas/tests/series/methods/test_fillna.py b/pandas/tests/series/methods/test_fillna.py
index 9fded26c37caf..ae6622da1d407 100644
--- a/pandas/tests/series/methods/test_fillna.py
+++ b/pandas/tests/series/methods/test_fillna.py
@@ -26,7 +26,7 @@
class TestSeriesFillNA:
def test_fillna_nat(self):
- series = Series([0, 1, 2, NaT.value], dtype="M8[ns]")
+ series = Series([0, 1, 2, NaT._value], dtype="M8[ns]")
filled = series.fillna(method="pad")
filled2 = series.fillna(value=series.values[2])
@@ -44,7 +44,7 @@ def test_fillna_nat(self):
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
- series = Series([NaT.value, 0, 1, 2], dtype="M8[ns]")
+ series = Series([NaT._value, 0, 1, 2], dtype="M8[ns]")
filled = series.fillna(method="bfill")
filled2 = series.fillna(value=series[1])
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index 49c5b78a48a9f..f01c78460097f 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -907,7 +907,7 @@ def test_constructor_datelike_coercion2(self):
def test_constructor_mixed_int_and_timestamp(self, frame_or_series):
# specifically Timestamp with nanos, not datetimes
- objs = [Timestamp(9), 10, NaT.value]
+ objs = [Timestamp(9), 10, NaT._value]
result = frame_or_series(objs, dtype="M8[ns]")
expected = frame_or_series([Timestamp(9), Timestamp(10), NaT])
diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py
index 3c0f962b90086..81e1e4c205cf7 100644
--- a/pandas/tests/series/test_missing.py
+++ b/pandas/tests/series/test_missing.py
@@ -44,7 +44,7 @@ def test_timedelta64_nan(self):
td1 = td.copy()
td1[0] = np.nan
assert isna(td1[0])
- assert td1[0].value == iNaT
+ assert td1[0]._value == iNaT
td1[0] = td[0]
assert not isna(td1[0])
@@ -58,7 +58,7 @@ def test_timedelta64_nan(self):
td1[2] = NaT
assert isna(td1[2])
- assert td1[2].value == iNaT
+ assert td1[2]._value == iNaT
td1[2] = td[2]
assert not isna(td1[2])
diff --git a/pandas/tests/series/test_reductions.py b/pandas/tests/series/test_reductions.py
index e9d2877148c2b..eb11b62a651cc 100644
--- a/pandas/tests/series/test_reductions.py
+++ b/pandas/tests/series/test_reductions.py
@@ -59,7 +59,7 @@ def test_td64_summation_overflow():
# the computation is converted to float so
# might be some loss of precision
- assert np.allclose(result.value / 1000, expected.value / 1000)
+ assert np.allclose(result._value / 1000, expected._value / 1000)
# sum
msg = "overflow in timedelta operation"
diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py
index 43a6c7028883b..23de7106e08a4 100644
--- a/pandas/tests/series/test_repr.py
+++ b/pandas/tests/series/test_repr.py
@@ -241,7 +241,7 @@ def test_format_pre_1900_dates(self):
repr(ts)
def test_series_repr_nat(self):
- series = Series([0, 1000, 2000, pd.NaT.value], dtype="M8[ns]")
+ series = Series([0, 1000, 2000, pd.NaT._value], dtype="M8[ns]")
result = repr(series)
expected = (
diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py
index 7a93d2fe8b5ce..93ea31fe68f0f 100644
--- a/pandas/tests/tools/test_to_datetime.py
+++ b/pandas/tests/tools/test_to_datetime.py
@@ -945,8 +945,8 @@ def test_to_datetime_now(self):
# These should all be equal with infinite perf; this gives
# a generous margin of 10 seconds
- assert abs(pdnow.value - now.value) < 1e10
- assert abs(pdnow2.value - now.value) < 1e10
+ assert abs(pdnow._value - now._value) < 1e10
+ assert abs(pdnow2._value - now._value) < 1e10
assert pdnow.tzinfo is None
assert pdnow2.tzinfo is None
@@ -970,10 +970,10 @@ def test_to_datetime_today(self, tz):
# These should all be equal with infinite perf; this gives
# a generous margin of 10 seconds
- assert abs(pdtoday.normalize().value - nptoday) < 1e10
- assert abs(pdtoday2.normalize().value - nptoday) < 1e10
- assert abs(pdtoday.value - tstoday.value) < 1e10
- assert abs(pdtoday.value - tstoday2.value) < 1e10
+ assert abs(pdtoday.normalize()._value - nptoday) < 1e10
+ assert abs(pdtoday2.normalize()._value - nptoday) < 1e10
+ assert abs(pdtoday._value - tstoday._value) < 1e10
+ assert abs(pdtoday._value - tstoday2._value) < 1e10
assert pdtoday.tzinfo is None
assert pdtoday2.tzinfo is None
diff --git a/pandas/tests/tools/test_to_timedelta.py b/pandas/tests/tools/test_to_timedelta.py
index 0ce2a424f6915..6be499b6dc474 100644
--- a/pandas/tests/tools/test_to_timedelta.py
+++ b/pandas/tests/tools/test_to_timedelta.py
@@ -73,7 +73,7 @@ def test_to_timedelta_units_dtypes(self, dtype, unit):
tm.assert_index_equal(result, expected)
def test_to_timedelta_oob_non_nano(self):
- arr = np.array([pd.NaT.value + 1], dtype="timedelta64[m]")
+ arr = np.array([pd.NaT._value + 1], dtype="timedelta64[m]")
msg = (
"Cannot convert -9223372036854775807 minutes to "
@@ -218,7 +218,7 @@ def test_to_timedelta_on_missing_values(self):
@pytest.mark.parametrize("val", [np.nan, pd.NaT])
def test_to_timedelta_on_missing_values_scalar(self, val):
actual = to_timedelta(val)
- assert actual.value == np.timedelta64("NaT").astype("int64")
+ assert actual._value == np.timedelta64("NaT").astype("int64")
def test_to_timedelta_float(self):
# https://github.com/pandas-dev/pandas/issues/25077
@@ -280,7 +280,7 @@ def test_to_timedelta_zerodim(self, fixed_now_ts):
arg2 = arg.view("m8[ns]")
result = to_timedelta(arg2)
assert isinstance(result, pd.Timedelta)
- assert result.value == dt64.view("i8")
+ assert result._value == dt64.view("i8")
def test_to_timedelta_numeric_ea(self, any_numeric_ea_dtype):
# GH#48796
diff --git a/pandas/tests/tseries/offsets/test_ticks.py b/pandas/tests/tseries/offsets/test_ticks.py
index 13619c2c0c828..69953955ebbce 100644
--- a/pandas/tests/tseries/offsets/test_ticks.py
+++ b/pandas/tests/tseries/offsets/test_ticks.py
@@ -257,7 +257,7 @@ def test_tick_division(cls):
assert not isinstance(result, cls)
assert result.delta == off.delta / 1000
- if cls._nanos_inc < Timedelta(seconds=1).value:
+ if cls._nanos_inc < Timedelta(seconds=1)._value:
# Case where we end up with a bigger class
result = off / 0.001
assert isinstance(result, offsets.Tick)
diff --git a/pandas/tests/tslibs/test_timezones.py b/pandas/tests/tslibs/test_timezones.py
index 386b89fa03471..28e4889983fb9 100644
--- a/pandas/tests/tslibs/test_timezones.py
+++ b/pandas/tests/tslibs/test_timezones.py
@@ -57,7 +57,7 @@ def test_tzlocal_offset():
offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1))
offset = offset.total_seconds()
- assert ts.value + offset == Timestamp("2011-01-01").value
+ assert ts._value + offset == Timestamp("2011-01-01")._value
def test_tzlocal_is_not_utc():
| - [ ] closes #49076 (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50891 | 2023-01-20T11:37:44Z | 2023-02-09T01:03:58Z | 2023-02-09T01:03:58Z | 2023-02-09T01:04:06Z |
DOC: removed hypen to fix docs build numpydoc warnings | diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index 17d4e0dd3234b..79332f8ede936 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -2792,7 +2792,7 @@ cdef class Week(SingleConstructorOffset):
2nd week of each month.
Examples
- ---------
+ --------
>>> date_object = pd.Timestamp("2023-01-13")
>>> date_object
| I noticed a few warning in a recent PR. This should fix them.
Ref:


| https://api.github.com/repos/pandas-dev/pandas/pulls/50890 | 2023-01-20T11:21:36Z | 2023-01-20T17:35:06Z | 2023-01-20T17:35:06Z | 2023-01-20T17:35:13Z |
DEPR: remove Int/Uint/Float64Index from tests/arithmetic + various | diff --git a/pandas/tests/arithmetic/conftest.py b/pandas/tests/arithmetic/conftest.py
index 83b95a1b46075..e41a3c212fbf8 100644
--- a/pandas/tests/arithmetic/conftest.py
+++ b/pandas/tests/arithmetic/conftest.py
@@ -75,7 +75,7 @@ def zero(request):
--------
arr = RangeIndex(5)
arr / zeros
- Float64Index([nan, inf, inf, inf, inf], dtype='float64')
+ NumericIndex([nan, inf, inf, inf, inf], dtype='float64')
"""
return request.param
diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py
index a38fdb2c2cde8..e179674865a84 100644
--- a/pandas/tests/arithmetic/test_numeric.py
+++ b/pandas/tests/arithmetic/test_numeric.py
@@ -22,11 +22,7 @@
)
import pandas._testing as tm
from pandas.core import ops
-from pandas.core.api import (
- Float64Index,
- Int64Index,
- UInt64Index,
-)
+from pandas.core.api import NumericIndex
from pandas.core.computation import expressions as expr
from pandas.tests.arithmetic.common import (
assert_invalid_addsub_type,
@@ -1058,72 +1054,77 @@ def test_series_divmod_zero(self):
class TestUFuncCompat:
- @pytest.mark.parametrize(
- "holder",
- [Int64Index, UInt64Index, Float64Index, RangeIndex, Series],
- )
- def test_ufunc_compat(self, holder):
+ # TODO: add more dtypes
+ @pytest.mark.parametrize("holder", [NumericIndex, RangeIndex, Series])
+ @pytest.mark.parametrize("dtype", [np.int64, np.uint64, np.float64])
+ def test_ufunc_compat(self, holder, dtype):
box = Series if holder is Series else Index
if holder is RangeIndex:
+ if dtype != np.int64:
+ pytest.skip(f"dtype {dtype} not relevant for RangeIndex")
idx = RangeIndex(0, 5, name="foo")
else:
- idx = holder(np.arange(5, dtype="int64"), name="foo")
+ idx = holder(np.arange(5, dtype=dtype), name="foo")
result = np.sin(idx)
- expected = box(np.sin(np.arange(5, dtype="int64")), name="foo")
+ expected = box(np.sin(np.arange(5, dtype=dtype)), name="foo")
tm.assert_equal(result, expected)
- @pytest.mark.parametrize("holder", [Int64Index, UInt64Index, Float64Index, Series])
- def test_ufunc_coercions(self, holder):
- idx = holder([1, 2, 3, 4, 5], name="x")
+ # TODO: add more dtypes
+ @pytest.mark.parametrize("holder", [NumericIndex, Series])
+ @pytest.mark.parametrize("dtype", [np.int64, np.uint64, np.float64])
+ def test_ufunc_coercions(self, holder, dtype):
+ idx = holder([1, 2, 3, 4, 5], dtype=dtype, name="x")
box = Series if holder is Series else Index
result = np.sqrt(idx)
assert result.dtype == "f8" and isinstance(result, box)
- exp = Float64Index(np.sqrt(np.array([1, 2, 3, 4, 5])), name="x")
+ exp = Index(np.sqrt(np.array([1, 2, 3, 4, 5], dtype=np.float64)), name="x")
exp = tm.box_expected(exp, box)
tm.assert_equal(result, exp)
result = np.divide(idx, 2.0)
assert result.dtype == "f8" and isinstance(result, box)
- exp = Float64Index([0.5, 1.0, 1.5, 2.0, 2.5], name="x")
+ exp = Index([0.5, 1.0, 1.5, 2.0, 2.5], dtype=np.float64, name="x")
exp = tm.box_expected(exp, box)
tm.assert_equal(result, exp)
# _evaluate_numeric_binop
result = idx + 2.0
assert result.dtype == "f8" and isinstance(result, box)
- exp = Float64Index([3.0, 4.0, 5.0, 6.0, 7.0], name="x")
+ exp = Index([3.0, 4.0, 5.0, 6.0, 7.0], dtype=np.float64, name="x")
exp = tm.box_expected(exp, box)
tm.assert_equal(result, exp)
result = idx - 2.0
assert result.dtype == "f8" and isinstance(result, box)
- exp = Float64Index([-1.0, 0.0, 1.0, 2.0, 3.0], name="x")
+ exp = Index([-1.0, 0.0, 1.0, 2.0, 3.0], dtype=np.float64, name="x")
exp = tm.box_expected(exp, box)
tm.assert_equal(result, exp)
result = idx * 1.0
assert result.dtype == "f8" and isinstance(result, box)
- exp = Float64Index([1.0, 2.0, 3.0, 4.0, 5.0], name="x")
+ exp = Index([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float64, name="x")
exp = tm.box_expected(exp, box)
tm.assert_equal(result, exp)
result = idx / 2.0
assert result.dtype == "f8" and isinstance(result, box)
- exp = Float64Index([0.5, 1.0, 1.5, 2.0, 2.5], name="x")
+ exp = Index([0.5, 1.0, 1.5, 2.0, 2.5], dtype=np.float64, name="x")
exp = tm.box_expected(exp, box)
tm.assert_equal(result, exp)
- @pytest.mark.parametrize("holder", [Int64Index, UInt64Index, Float64Index, Series])
- def test_ufunc_multiple_return_values(self, holder):
- obj = holder([1, 2, 3], name="x")
+ # TODO: add more dtypes
+ @pytest.mark.parametrize("holder", [NumericIndex, Series])
+ @pytest.mark.parametrize("dtype", [np.int64, np.uint64, np.float64])
+ def test_ufunc_multiple_return_values(self, holder, dtype):
+ obj = holder([1, 2, 3], dtype=dtype, name="x")
box = Series if holder is Series else Index
result = np.modf(obj)
assert isinstance(result, tuple)
- exp1 = Float64Index([0.0, 0.0, 0.0], name="x")
- exp2 = Float64Index([1.0, 2.0, 3.0], name="x")
+ exp1 = Index([0.0, 0.0, 0.0], dtype=np.float64, name="x")
+ exp2 = Index([1.0, 2.0, 3.0], dtype=np.float64, name="x")
tm.assert_equal(result[0], tm.box_expected(exp1, box))
tm.assert_equal(result[1], tm.box_expected(exp2, box))
@@ -1241,7 +1242,7 @@ def test_binops_index(self, op, idx1, idx2):
@pytest.mark.parametrize("scalar", [-1, 1, 2])
def test_binops_index_scalar(self, op, idx, scalar):
result = op(idx, scalar)
- expected = op(Int64Index(idx), scalar)
+ expected = op(Index(idx.to_numpy()), scalar)
tm.assert_index_equal(result, expected, exact="equiv")
@pytest.mark.parametrize("idx1", [RangeIndex(0, 10, 1), RangeIndex(0, 20, 2)])
@@ -1261,7 +1262,7 @@ def test_binops_index_scalar_pow(self, idx, scalar):
# numpy does not allow powers of negative integers so test separately
# https://github.com/numpy/numpy/pull/8127
result = pow(idx, scalar)
- expected = pow(Int64Index(idx), scalar)
+ expected = pow(Index(idx.to_numpy()), scalar)
tm.assert_index_equal(result, expected, exact="equiv")
# TODO: divmod?
@@ -1336,17 +1337,18 @@ def test_numeric_compat2(self):
@pytest.mark.parametrize(
"idx, div, expected",
[
+ # TODO: add more dtypes
(RangeIndex(0, 1000, 2), 2, RangeIndex(0, 500, 1)),
(RangeIndex(-99, -201, -3), -3, RangeIndex(33, 67, 1)),
(
RangeIndex(0, 1000, 1),
2,
- Int64Index(RangeIndex(0, 1000, 1)._values) // 2,
+ Index(RangeIndex(0, 1000, 1)._values) // 2,
),
(
RangeIndex(0, 100, 1),
2.0,
- Int64Index(RangeIndex(0, 100, 1)._values) // 2.0,
+ Index(RangeIndex(0, 100, 1)._values) // 2.0,
),
(RangeIndex(0), 50, RangeIndex(0)),
(RangeIndex(2, 4, 2), 3, RangeIndex(0, 1, 1)),
diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index 29bfaf99b744b..e7be78be55620 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -137,7 +137,7 @@ def test_agg_apply_corner(ts, tsframe):
grouped = ts.groupby(ts * np.nan, group_keys=False)
assert ts.dtype == np.float64
- # groupby float64 values results in Float64Index
+ # groupby float64 values results in a float64 Index
exp = Series([], dtype=np.float64, index=Index([], dtype=np.float64))
tm.assert_series_equal(grouped.sum(), exp)
tm.assert_series_equal(grouped.agg(np.sum), exp)
diff --git a/pandas/tests/indexes/numeric/test_astype.py b/pandas/tests/indexes/numeric/test_astype.py
index ae20c34711366..1c2df6008de5d 100644
--- a/pandas/tests/indexes/numeric/test_astype.py
+++ b/pandas/tests/indexes/numeric/test_astype.py
@@ -11,7 +11,7 @@
class TestAstype:
def test_astype_float64_to_uint64(self):
- # GH#45309 used to incorrectly return Int64Index
+ # GH#45309 used to incorrectly return Index with int64 dtype
idx = Index([0.0, 5.0, 10.0, 15.0, 20.0], dtype=np.float64)
result = idx.astype("u8")
expected = Index([0, 5, 10, 15, 20], dtype=np.uint64)
diff --git a/pandas/tests/indexes/period/methods/test_astype.py b/pandas/tests/indexes/period/methods/test_astype.py
index 993e48f2ebaf6..2a605d136175e 100644
--- a/pandas/tests/indexes/period/methods/test_astype.py
+++ b/pandas/tests/indexes/period/methods/test_astype.py
@@ -11,7 +11,6 @@
period_range,
)
import pandas._testing as tm
-from pandas.core.indexes.api import Int64Index
class TestPeriodIndexAsType:
@@ -36,7 +35,7 @@ def test_astype_conversion(self):
tm.assert_index_equal(result, expected)
result = idx.astype(np.int64)
- expected = Int64Index(
+ expected = Index(
[16937] + [-9223372036854775808] * 3, dtype=np.int64, name="idx"
)
tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/period/test_constructors.py b/pandas/tests/indexes/period/test_constructors.py
index 5dff5c2ad9c86..60e50a757a271 100644
--- a/pandas/tests/indexes/period/test_constructors.py
+++ b/pandas/tests/indexes/period/test_constructors.py
@@ -329,7 +329,7 @@ def test_constructor_simple_new(self):
msg = "Should be numpy array of type i8"
with pytest.raises(AssertionError, match=msg):
- # Need ndarray, not Int64Index
+ # Need ndarray, not int64 Index
type(idx._data)._simple_new(Index(idx.asi8), freq=idx.freq)
arr = type(idx._data)._simple_new(idx.asi8, freq=idx.freq)
diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py
index d8b9082ec318a..46cbbbd3e6480 100644
--- a/pandas/tests/reductions/test_reductions.py
+++ b/pandas/tests/reductions/test_reductions.py
@@ -874,7 +874,7 @@ def test_idxmax(self):
result = s.idxmax()
assert result == 4
- # Float64Index
+ # Index with float64 dtype
# GH#5914
s = Series([1, 2, 3], [1.1, 2.1, 3.1])
result = s.idxmax()
| Progress towards #42717.
Arithmetic ops with indexes with 8/16/32 bit dtypes is a bit difficult and I'd like like to work on that after removal of Int64Index etc. So there's a follow-up, where more dtypes should be added to the tests in `pandas/tests/arithmetic/`. | https://api.github.com/repos/pandas-dev/pandas/pulls/50889 | 2023-01-20T10:59:46Z | 2023-01-20T17:39:20Z | 2023-01-20T17:39:20Z | 2023-01-20T17:59:59Z |
Gitpod docs: added missing line break | diff --git a/doc/source/development/contributing_gitpod.rst b/doc/source/development/contributing_gitpod.rst
index 664711dc8928b..c591be5425db9 100644
--- a/doc/source/development/contributing_gitpod.rst
+++ b/doc/source/development/contributing_gitpod.rst
@@ -145,7 +145,7 @@ docs you need to run the following command in the docs directory::
Alternatively you can build a single page with::
- python make.py html python make.py --single development/contributing_gitpod.rst
+ python make.py --single development/contributing_gitpod.rst
You have two main options to render the documentation in Gitpod.
| Thanks to the PyLadies Berlin pandas sprint participants who noticed this error 🙇♀️
CC @phofl | https://api.github.com/repos/pandas-dev/pandas/pulls/50888 | 2023-01-20T08:52:01Z | 2023-01-20T18:59:29Z | 2023-01-20T18:59:29Z | 2023-01-20T18:59:36Z |
Updated release instructions to use rst instead of markdown | diff --git a/doc/source/development/maintaining.rst b/doc/source/development/maintaining.rst
index 4113fabcc78d5..3405f283c639a 100644
--- a/doc/source/development/maintaining.rst
+++ b/doc/source/development/maintaining.rst
@@ -349,10 +349,10 @@ The release process makes a snapshot of pandas (a git commit) available to users
a particular version number. After the release the new pandas version will be available
in the next places:
-- Git repo with a [new tag](https://github.com/pandas-dev/pandas/tags)
-- Source distribution in a [GitHub release](https://github.com/pandas-dev/pandas/releases)
-- Pip packages in the [PyPI](https://pypi.org/project/pandas/)
-- Conda/Mamba packages in [conda-forge](https://anaconda.org/conda-forge/pandas)
+- Git repo with a `new tag <https://github.com/pandas-dev/pandas/tags>`_
+- Source distribution in a `GitHub release <https://github.com/pandas-dev/pandas/releases>`_
+- Pip packages in the `PyPI <https://pypi.org/project/pandas/>`_
+- Conda/Mamba packages in `conda-forge <https://anaconda.org/conda-forge/pandas>`_
The process for releasing a new version of pandas is detailed next section.
@@ -368,11 +368,11 @@ Prerequisites
In order to be able to release a new pandas version, the next permissions are needed:
-- Merge rights to the [pandas](https://github.com/pandas-dev/pandas/),
- [pandas-wheels](https://github.com/MacPython/pandas-wheels), and
- [pandas-feedstock](https://github.com/conda-forge/pandas-feedstock/) repositories.
+- Merge rights to the `pandas <https://github.com/pandas-dev/pandas/>`_,
+ `pandas-wheels <https://github.com/MacPython/pandas-wheels>`_, and
+ `pandas-feedstock <https://github.com/conda-forge/pandas-feedstock/>`_ repositories.
- Permissions to push to main in the pandas repository, to push the new tags.
-- Write permissions to [PyPI](https://github.com/conda-forge/pandas-feedstock/pulls)
+- `Write permissions to PyPI <https://github.com/conda-forge/pandas-feedstock/pulls>`_
- Access to the social media accounts, to publish the announcements.
Pre-release
@@ -408,7 +408,7 @@ Pre-release
Release
```````
-1. Create an empty commit and a tag in the last commit of the branch to be released:
+1. Create an empty commit and a tag in the last commit of the branch to be released::
git checkout <branch>
git pull --ff-only upstream <branch>
@@ -423,7 +423,7 @@ which will be triggered when the tag is pushed.
2. Only if the release is a release candidate, we want to create a new branch for it, immediately
after creating the tag. For example, if we are releasing pandas 1.4.0rc0, we would like to
create the branch 1.4.x to backport commits to the 1.4 versions. As well as create a tag to
- mark the start of the development of 1.5.0 (assuming it is the next version):
+ mark the start of the development of 1.5.0 (assuming it is the next version)::
git checkout -b 1.4.x
git push upstream 1.4.x
@@ -436,7 +436,7 @@ which will be triggered when the tag is pushed.
./setup.py sdist --formats=gztar --quiet
-4. Create a [new GitHub release](https://github.com/pandas-dev/pandas/releases/new):
+4. Create a `new GitHub release <https://github.com/pandas-dev/pandas/releases/new>`_:
- Title: ``Pandas <version>``
- Tag: ``<version>``
@@ -447,13 +447,13 @@ which will be triggered when the tag is pushed.
(e.g. releasing 1.4.5 after 1.5 has been released)
5. The GitHub release will after some hours trigger an
- [automated conda-forge PR](https://github.com/conda-forge/pandas-feedstock/pulls).
+ `automated conda-forge PR <https://github.com/conda-forge/pandas-feedstock/pulls>`_.
Merge it once the CI is green, and it will generate the conda-forge packages.
6. Packages for supported versions in PyPI are built in the
- [MacPython repo](https://github.com/MacPython/pandas-wheels).
+ `MacPython repo <https://github.com/MacPython/pandas-wheels>`_.
Open a PR updating the build commit to the released version, and merge it once the
- CI is green.
+ CI is green. To do this type::
git checkout master
git pull --ff-only upstream master
@@ -486,7 +486,7 @@ Post-Release
4. Create a new issue for the next release, with the estimated date of release.
5. Open a PR with the placeholder for the release notes of the next version. See
- for example [the PR for 1.5.3](https://github.com/pandas-dev/pandas/pull/49843/files).
+ for example `the PR for 1.5.3 <https://github.com/pandas-dev/pandas/pull/49843/files>`_.
6. Announce the new release in the official channels (use previous announcements
for reference):
| - [ ] closes #50824
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
@datapythonista I tried to fix the other pull request but made a mess of it so I thought it would be best to start over. If this was the wrong, please let me know what should have been done; still learning the GitHub/ git workflow.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50886 | 2023-01-20T04:46:10Z | 2023-01-23T19:16:28Z | 2023-01-23T19:16:28Z | 2023-01-24T02:14:05Z |
TST/CLN: Remove unneeded warning filtering in plotting helper | diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py
index 20de38ebf6665..3cc0d59457528 100644
--- a/pandas/tests/plotting/common.py
+++ b/pandas/tests/plotting/common.py
@@ -8,7 +8,6 @@
TYPE_CHECKING,
Sequence,
)
-import warnings
import numpy as np
@@ -516,7 +515,7 @@ def get_y_axis(self, ax):
return ax._shared_axes["y"]
-def _check_plot_works(f, filterwarnings="always", default_axes=False, **kwargs):
+def _check_plot_works(f, default_axes=False, **kwargs):
"""
Create plot and ensure that plot return object is valid.
@@ -524,9 +523,6 @@ def _check_plot_works(f, filterwarnings="always", default_axes=False, **kwargs):
----------
f : func
Plotting function.
- filterwarnings : str
- Warnings filter.
- See https://docs.python.org/3/library/warnings.html#warning-filter
default_axes : bool, optional
If False (default):
- If `ax` not in `kwargs`, then create subplot(211) and plot there
@@ -554,24 +550,22 @@ def _check_plot_works(f, filterwarnings="always", default_axes=False, **kwargs):
gen_plots = _gen_two_subplots
ret = None
- with warnings.catch_warnings():
- warnings.simplefilter(filterwarnings)
- try:
- fig = kwargs.get("figure", plt.gcf())
- plt.clf()
+ try:
+ fig = kwargs.get("figure", plt.gcf())
+ plt.clf()
- for ret in gen_plots(f, fig, **kwargs):
- tm.assert_is_valid_plot_return_object(ret)
+ for ret in gen_plots(f, fig, **kwargs):
+ tm.assert_is_valid_plot_return_object(ret)
- with tm.ensure_clean(return_filelike=True) as path:
- plt.savefig(path)
+ with tm.ensure_clean(return_filelike=True) as path:
+ plt.savefig(path)
- except Exception as err:
- raise err
- finally:
- tm.close(fig)
+ except Exception as err:
+ raise err
+ finally:
+ tm.close(fig)
- return ret
+ return ret
def _gen_default_plot(f, fig, **kwargs):
diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py
index 797aae7eaba3a..a80476038b7f1 100644
--- a/pandas/tests/plotting/test_hist_method.py
+++ b/pandas/tests/plotting/test_hist_method.py
@@ -413,7 +413,6 @@ def test_hist_subplot_xrot(self):
axes = _check_plot_works(
df.hist,
default_axes=True,
- filterwarnings="always",
column="length",
by="animal",
bins=5,
diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py
index 67486ec2a17b6..9f0753ce0fecd 100644
--- a/pandas/tests/plotting/test_misc.py
+++ b/pandas/tests/plotting/test_misc.py
@@ -109,7 +109,6 @@ def test_scatter_matrix_axis(self, pass_axis):
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
axes = _check_plot_works(
scatter_matrix,
- filterwarnings="always",
frame=df,
range_padding=0.1,
ax=ax,
@@ -127,7 +126,6 @@ def test_scatter_matrix_axis(self, pass_axis):
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
axes = _check_plot_works(
scatter_matrix,
- filterwarnings="always",
frame=df,
range_padding=0.1,
ax=ax,
| It doesn't appear these tests raise warnings so I think these can be removed
| https://api.github.com/repos/pandas-dev/pandas/pulls/50880 | 2023-01-19T23:53:23Z | 2023-01-20T11:32:31Z | 2023-01-20T11:32:30Z | 2023-01-20T15:28:37Z |
ENH: support argmin/argmax with pyarrow durations | diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py
index fcec562279248..42d85935efe95 100644
--- a/pandas/core/arrays/arrow/array.py
+++ b/pandas/core/arrays/arrow/array.py
@@ -528,8 +528,12 @@ def _argmin_max(self, skipna: bool, method: str) -> int:
f"arg{method} only implemented for pyarrow version >= 6.0"
)
- value = getattr(pc, method)(self._data, skip_nulls=skipna)
- return pc.index(self._data, value).as_py()
+ data = self._data
+ if pa.types.is_duration(data.type):
+ data = data.cast(pa.int64())
+
+ value = getattr(pc, method)(data, skip_nulls=skipna)
+ return pc.index(data, value).as_py()
def argmin(self, skipna: bool = True) -> int:
return self._argmin_max(skipna, "min")
diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py
index f7517e1a3ab7f..c798a3d91f816 100644
--- a/pandas/tests/extension/test_arrow.py
+++ b/pandas/tests/extension/test_arrow.py
@@ -869,13 +869,6 @@ def test_argmin_argmax(
reason=f"{pa_dtype} only has 2 unique possible values",
)
)
- elif pa.types.is_duration(pa_dtype):
- request.node.add_marker(
- pytest.mark.xfail(
- raises=pa.ArrowNotImplementedError,
- reason=f"min_max not supported in pyarrow for {pa_dtype}",
- )
- )
super().test_argmin_argmax(data_for_sorting, data_missing_for_sorting, na_value)
@pytest.mark.parametrize(
@@ -894,7 +887,6 @@ def test_argmin_argmax(
def test_argreduce_series(
self, data_missing_for_sorting, op_name, skipna, expected, request
):
- pa_dtype = data_missing_for_sorting.dtype.pyarrow_dtype
if pa_version_under6p0 and skipna:
request.node.add_marker(
pytest.mark.xfail(
@@ -902,13 +894,6 @@ def test_argreduce_series(
reason="min_max not supported in pyarrow",
)
)
- elif not pa_version_under6p0 and pa.types.is_duration(pa_dtype) and skipna:
- request.node.add_marker(
- pytest.mark.xfail(
- raises=pa.ArrowNotImplementedError,
- reason=f"min_max not supported in pyarrow for {pa_dtype}",
- )
- )
super().test_argreduce_series(
data_missing_for_sorting, op_name, skipna, expected
)
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50879 | 2023-01-19T22:46:24Z | 2023-01-20T18:08:38Z | 2023-01-20T18:08:38Z | 2023-01-20T18:15:29Z |
Remove obj_with_exclusions | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 7555c8b68a4f7..10201dc81136b 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -1096,6 +1096,7 @@ Groupby/resample/rolling
- Bug in :meth:`.DataFrameGroupBy.transform` and :meth:`.SeriesGroupBy.transform` would raise incorrectly when grouper had ``axis=1`` for ``"idxmin"`` and ``"idxmax"`` arguments (:issue:`45986`)
- Bug in :class:`.DataFrameGroupBy` would raise when used with an empty DataFrame, categorical grouper, and ``dropna=False`` (:issue:`50634`)
- Bug in :meth:`.SeriesGroupBy.value_counts` did not respect ``sort=False`` (:issue:`50482`)
+- Bug in :meth:`.DataFrameGroupBy.describe` produced incorrect results when data had duplicate columns (:issue:`50806`)
-
Reshaping
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index c15948ce877a8..8b3618bced2ba 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -610,7 +610,7 @@ def f(self):
class BaseGroupBy(PandasObject, SelectionMixin[NDFrameT], GroupByIndexingMixin):
- _group_selection: IndexLabel | None = None
+ _group_selection: list[int] | None = None
_hidden_attrs = PandasObject._hidden_attrs | {
"as_index",
"axis",
@@ -726,7 +726,9 @@ def _selected_obj(self):
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
- return self.obj[self._group_selection]
+ return self.obj._take(
+ self._group_selection, axis=1, convert_indices=False
+ )
return self.obj
else:
return self.obj[self._selection]
@@ -939,6 +941,12 @@ def __init__(
self.grouper = grouper
self.exclusions = frozenset(exclusions) if exclusions else frozenset()
+ with self._group_selection_context():
+ so = self._selected_obj
+ owe = self._obj_with_exclusions
+ import pandas._testing as tm
+ tm.assert_equal(so, owe)
+
def __getattr__(self, attr: str):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
@@ -1023,7 +1031,9 @@ def _set_group_selection(self) -> None:
if len(groupers):
# GH12839 clear selected obj cache when group selection changes
ax = self.obj._info_axis
- self._group_selection = ax.difference(Index(groupers), sort=False).tolist()
+ self._group_selection = [
+ idx for idx, label in enumerate(ax) if label not in groupers
+ ]
self._reset_cache("_selected_obj")
@final
diff --git a/pandas/tests/groupby/test_counting.py b/pandas/tests/groupby/test_counting.py
index 7e7f1a628da6e..175414052d836 100644
--- a/pandas/tests/groupby/test_counting.py
+++ b/pandas/tests/groupby/test_counting.py
@@ -358,6 +358,7 @@ def test_lower_int_prec_count():
tm.assert_frame_equal(result, expected)
+@pytest.mark.xfail(reason="Raises when we use tm.assert_equal")
def test_count_uses_size_on_exception():
class RaisingObjectException(Exception):
pass
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index 1e16e353cc1a4..c077fb1d257a5 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -1254,6 +1254,27 @@ def test_describe_with_duplicate_output_column_names(as_index, keys):
tm.assert_frame_equal(result, expected)
+def test_describe_duplicate_columns():
+ # GH#50806
+ df = DataFrame([[0, 1, 2, 3]])
+ df.columns = [0, 1, 2, 0]
+ gb = df.groupby(df[1])
+ result = gb.describe(percentiles=[])
+
+ columns = ["count", "mean", "std", "min", "50%", "max"]
+ frames = [
+ DataFrame([[1.0, val, np.nan, val, val, val]], index=[1], columns=columns)
+ for val in (0.0, 2.0, 3.0)
+ ]
+ expected = pd.concat(frames, axis=1)
+ expected.columns = MultiIndex(
+ levels=[[0, 2], columns],
+ codes=[6 * [0] + 6 * [1] + 6 * [0], 3 * list(range(6))],
+ )
+ expected.index.names = [1]
+ tm.assert_frame_equal(result, expected)
+
+
def test_groupby_mean_no_overflow():
# Regression test for (#22487)
df = DataFrame(
@@ -1594,3 +1615,29 @@ def test_multiindex_group_all_columns_when_empty(groupby_func):
result = method(*args).index
expected = df.index
tm.assert_index_equal(result, expected)
+
+
+def test_duplicate_columns(request, groupby_func, as_index):
+ # GH#50806
+ if groupby_func == "corrwith":
+ msg = "GH#50845 - corrwith fails when there are duplicate columns"
+ request.node.add_marker(pytest.mark.xfail(reason=msg))
+ df = DataFrame([[1, 3, 6], [1, 4, 7], [2, 5, 8]], columns=list("abb"))
+ args = get_groupby_method_args(groupby_func, df)
+ gb = df.groupby("a", as_index=as_index)
+ result = getattr(gb, groupby_func)(*args)
+
+ if groupby_func in ("size", "ngroup", "cumcount"):
+ expected = getattr(
+ df.take([0, 1], axis=1).groupby("a", as_index=as_index), groupby_func
+ )(*args)
+ tm.assert_equal(result, expected)
+ else:
+ expected_df = df.copy()
+ expected_df.columns = ["a", "b", "c"]
+ expected_args = get_groupby_method_args(groupby_func, expected_df)
+ expected = getattr(expected_df.groupby("a", as_index=as_index), groupby_func)(
+ *expected_args
+ )
+ expected = expected.rename(columns={"c": "b"})
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index a1c1930c2e11b..3a0bfa67470bd 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -1700,6 +1700,7 @@ def test_groupby_multiindex_missing_pair():
tm.assert_frame_equal(res, exp)
+@pytest.mark.xfail(reason="GH#50805")
def test_groupby_multiindex_not_lexsorted():
# GH 11640
@@ -2867,3 +2868,14 @@ def test_groupby_method_drop_na(method):
else:
expected = DataFrame({"A": ["a", "b", "c"], "B": [0, 2, 4]}, index=[0, 2, 4])
tm.assert_frame_equal(result, expected)
+
+
+def test_selected_obj_duplicate_columns():
+ # GH#50806
+ df = DataFrame([[0, 1, 2, 3]])
+ df.columns = [0, 1, 2, 0]
+ gb = df.groupby(df[1])
+ with gb._group_selection_context():
+ result = gb._selected_obj
+ expected = df.take([0, 2, 3], axis=1)
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/groupby/test_groupby_dropna.py b/pandas/tests/groupby/test_groupby_dropna.py
index 5418a2a60dc80..3cee8baeb6e5b 100644
--- a/pandas/tests/groupby/test_groupby_dropna.py
+++ b/pandas/tests/groupby/test_groupby_dropna.py
@@ -544,9 +544,12 @@ def test_categorical_reducers(
gb_filled = df_filled.groupby(keys, observed=observed, sort=sort, as_index=True)
expected = getattr(gb_filled, reduction_func)(*args_filled).reset_index()
- expected["x"] = expected["x"].replace(4, None)
+ # Workaround since we can't use replace (GH#50872)
+ mask = expected["x"] == 4
+ expected["x"] = expected["x"].mask(mask, None).cat.remove_categories([4])
if index_kind == "multi":
- expected["x2"] = expected["x2"].replace(4, None)
+ mask = expected["x2"] == 4
+ expected["x2"] = expected["x2"].mask(mask, None).cat.remove_categories([4])
if as_index:
if index_kind == "multi":
expected = expected.set_index(["x", "x2"])
@@ -578,6 +581,8 @@ def test_categorical_reducers(
result = getattr(gb_keepna, reduction_func)(*args)
# size will return a Series, others are DataFrame
+ print(result.index.dtype)
+ print(expected.index.dtype)
tm.assert_equal(result, expected)
diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py
index 30cfe638c8540..6a56a3fac33f7 100644
--- a/pandas/tests/groupby/test_grouping.py
+++ b/pandas/tests/groupby/test_grouping.py
@@ -483,6 +483,7 @@ def test_multifunc_select_col_integer_cols(self, df):
# it works!
df.groupby(1, as_index=False)[2].agg({"Q": np.mean})
+ @pytest.mark.xfail(reason="GH#50805")
def test_multiindex_columns_empty_level(self):
lst = [["count", "values"], ["to filter", ""]]
midx = MultiIndex.from_tuples(lst)
diff --git a/pandas/tests/window/test_groupby.py b/pandas/tests/window/test_groupby.py
index 9216fff89e074..caab3af3f869e 100644
--- a/pandas/tests/window/test_groupby.py
+++ b/pandas/tests/window/test_groupby.py
@@ -818,7 +818,14 @@ def test_groupby_rolling_var(self, window, min_periods, closed, expected):
tm.assert_frame_equal(result, expected_result)
@pytest.mark.parametrize(
- "columns", [MultiIndex.from_tuples([("A", ""), ("B", "C")]), ["A", "B"]]
+ "columns",
+ [
+ pytest.param(
+ MultiIndex.from_tuples([("A", ""), ("B", "C")]),
+ marks=pytest.mark.xfail(reason="GH#50805"),
+ ),
+ ["A", "B"],
+ ],
)
def test_by_column_not_in_values(self, columns):
# GH 32262
| - [x] closes #46944 (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Built on #50846; just testing whether obj_with_exclusions is now the same as selected_obj with the exception of #50805 | https://api.github.com/repos/pandas-dev/pandas/pulls/50878 | 2023-01-19T22:16:26Z | 2023-02-08T03:56:23Z | null | 2023-02-08T03:56:28Z |
TST: fix pyarrow arithmetic xfails | diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py
index a7c243cdfe74f..1bfe0106e1938 100644
--- a/pandas/tests/extension/test_arrow.py
+++ b/pandas/tests/extension/test_arrow.py
@@ -1010,14 +1010,29 @@ def _patch_combine(self, obj, other, op):
else:
expected_data = expected
original_dtype = obj.dtype
- pa_array = pa.array(expected_data._values).cast(original_dtype.pyarrow_dtype)
- pd_array = type(expected_data._values)(pa_array)
+
+ pa_expected = pa.array(expected_data._values)
+
+ if pa.types.is_duration(pa_expected.type):
+ # pyarrow sees sequence of datetime/timedelta objects and defaults
+ # to "us" but the non-pointwise op retains unit
+ unit = original_dtype.pyarrow_dtype.unit
+ if type(other) in [datetime, timedelta] and unit in ["s", "ms"]:
+ # pydatetime/pytimedelta objects have microsecond reso, so we
+ # take the higher reso of the original and microsecond. Note
+ # this matches what we would do with DatetimeArray/TimedeltaArray
+ unit = "us"
+ pa_expected = pa_expected.cast(f"duration[{unit}]")
+ else:
+ pa_expected = pa_expected.cast(original_dtype.pyarrow_dtype)
+
+ pd_expected = type(expected_data._values)(pa_expected)
if was_frame:
expected = pd.DataFrame(
- pd_array, index=expected.index, columns=expected.columns
+ pd_expected, index=expected.index, columns=expected.columns
)
else:
- expected = pd.Series(pd_array)
+ expected = pd.Series(pd_expected)
return expected
def _is_temporal_supported(self, opname, pa_dtype):
@@ -1097,7 +1112,14 @@ def test_arith_series_with_scalar(
if mark is not None:
request.node.add_marker(mark)
- if all_arithmetic_operators == "__floordiv__" and pa.types.is_integer(pa_dtype):
+ if (
+ (
+ all_arithmetic_operators == "__floordiv__"
+ and pa.types.is_integer(pa_dtype)
+ )
+ or pa.types.is_duration(pa_dtype)
+ or pa.types.is_timestamp(pa_dtype)
+ ):
# BaseOpsUtil._combine always returns int64, while ArrowExtensionArray does
# not upcast
monkeypatch.setattr(TestBaseArithmeticOps, "_combine", self._patch_combine)
@@ -1121,7 +1143,14 @@ def test_arith_frame_with_scalar(
if mark is not None:
request.node.add_marker(mark)
- if all_arithmetic_operators == "__floordiv__" and pa.types.is_integer(pa_dtype):
+ if (
+ (
+ all_arithmetic_operators == "__floordiv__"
+ and pa.types.is_integer(pa_dtype)
+ )
+ or pa.types.is_duration(pa_dtype)
+ or pa.types.is_timestamp(pa_dtype)
+ ):
# BaseOpsUtil._combine always returns int64, while ArrowExtensionArray does
# not upcast
monkeypatch.setattr(TestBaseArithmeticOps, "_combine", self._patch_combine)
@@ -1165,18 +1194,41 @@ def test_arith_series_with_array(
# since ser.iloc[0] is a python scalar
other = pd.Series(pd.array([ser.iloc[0]] * len(ser), dtype=data.dtype))
- if pa.types.is_floating(pa_dtype) or (
- pa.types.is_integer(pa_dtype) and all_arithmetic_operators != "__truediv__"
+ if (
+ pa.types.is_floating(pa_dtype)
+ or (
+ pa.types.is_integer(pa_dtype)
+ and all_arithmetic_operators != "__truediv__"
+ )
+ or pa.types.is_duration(pa_dtype)
+ or pa.types.is_timestamp(pa_dtype)
):
monkeypatch.setattr(TestBaseArithmeticOps, "_combine", self._patch_combine)
self.check_opname(ser, op_name, other, exc=self.series_array_exc)
def test_add_series_with_extension_array(self, data, request):
pa_dtype = data.dtype.pyarrow_dtype
- if not (
- pa.types.is_integer(pa_dtype)
- or pa.types.is_floating(pa_dtype)
- or (not pa_version_under8p0 and pa.types.is_duration(pa_dtype))
+
+ if pa.types.is_temporal(pa_dtype) and not pa.types.is_duration(pa_dtype):
+ # i.e. timestamp, date, time, but not timedelta; these *should*
+ # raise when trying to add
+ ser = pd.Series(data)
+ if pa_version_under7p0:
+ msg = "Function add_checked has no kernel matching input types"
+ else:
+ msg = "Function 'add_checked' has no kernel matching input types"
+ with pytest.raises(NotImplementedError, match=msg):
+ # TODO: this is a pa.lib.ArrowNotImplementedError, might
+ # be better to reraise a TypeError; more consistent with
+ # non-pyarrow cases
+ ser + data
+
+ return
+
+ if (pa_version_under8p0 and pa.types.is_duration(pa_dtype)) or (
+ pa.types.is_binary(pa_dtype)
+ or pa.types.is_string(pa_dtype)
+ or pa.types.is_boolean(pa_dtype)
):
request.node.add_marker(
pytest.mark.xfail(
| fixes 22 xfails | https://api.github.com/repos/pandas-dev/pandas/pulls/50877 | 2023-01-19T21:57:51Z | 2023-02-02T18:41:56Z | 2023-02-02T18:41:56Z | 2023-02-02T18:57:07Z |
Fix groupby-resample KeyError when resampling on Index and giving explicit list of columns. | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 7555c8b68a4f7..986e5891033a1 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -1096,6 +1096,7 @@ Groupby/resample/rolling
- Bug in :meth:`.DataFrameGroupBy.transform` and :meth:`.SeriesGroupBy.transform` would raise incorrectly when grouper had ``axis=1`` for ``"idxmin"`` and ``"idxmax"`` arguments (:issue:`45986`)
- Bug in :class:`.DataFrameGroupBy` would raise when used with an empty DataFrame, categorical grouper, and ``dropna=False`` (:issue:`50634`)
- Bug in :meth:`.SeriesGroupBy.value_counts` did not respect ``sort=False`` (:issue:`50482`)
+- Bug in :meth:`.DataFrameGroupBy.resample` raises ``KeyError`` when getting the result from a key list when resampling on time index (:issue:`50840`)
-
Reshaping
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 3c26299c93b70..907d6522d3236 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -1202,7 +1202,7 @@ def _gotitem(self, key, ndim, subset=None):
# Try to select from a DataFrame, falling back to a Series
try:
- if isinstance(key, list) and self.key not in key:
+ if isinstance(key, list) and self.key not in key and self.key is not None:
key.append(self.key)
groupby = self._groupby[key]
except IndexError:
diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py
index 3ab57e137f1c1..c3717cee05f2b 100644
--- a/pandas/tests/resample/test_resampler_grouper.py
+++ b/pandas/tests/resample/test_resampler_grouper.py
@@ -536,3 +536,82 @@ def test_groupby_resample_size_all_index_same():
),
)
tm.assert_series_equal(result, expected)
+
+
+def test_groupby_resample_on_index_with_list_of_keys():
+ # GH 50840
+ df = DataFrame(
+ data={
+ "group": [0, 0, 0, 0, 1, 1, 1, 1],
+ "val": [3, 1, 4, 1, 5, 9, 2, 6],
+ },
+ index=Series(
+ date_range(start="2016-01-01", periods=8),
+ name="date",
+ ),
+ )
+ result = df.groupby("group").resample("2D")[["val"]].mean()
+ expected = DataFrame(
+ data={
+ "val": [2.0, 2.5, 7.0, 4.0],
+ },
+ index=Index(
+ data=[
+ (0, Timestamp("2016-01-01")),
+ (0, Timestamp("2016-01-03")),
+ (1, Timestamp("2016-01-05")),
+ (1, Timestamp("2016-01-07")),
+ ],
+ name=("group", "date"),
+ ),
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_groupby_resample_on_index_with_list_of_keys_multi_columns():
+ # GH 50876
+ df = DataFrame(
+ data={
+ "group": [0, 0, 0, 0, 1, 1, 1, 1],
+ "first_val": [3, 1, 4, 1, 5, 9, 2, 6],
+ "second_val": [2, 7, 1, 8, 2, 8, 1, 8],
+ "third_val": [1, 4, 1, 4, 2, 1, 3, 5],
+ },
+ index=Series(
+ date_range(start="2016-01-01", periods=8),
+ name="date",
+ ),
+ )
+ result = df.groupby("group").resample("2D")[["first_val", "second_val"]].mean()
+ expected = DataFrame(
+ data={
+ "first_val": [2.0, 2.5, 7.0, 4.0],
+ "second_val": [4.5, 4.5, 5.0, 4.5],
+ },
+ index=Index(
+ data=[
+ (0, Timestamp("2016-01-01")),
+ (0, Timestamp("2016-01-03")),
+ (1, Timestamp("2016-01-05")),
+ (1, Timestamp("2016-01-07")),
+ ],
+ name=("group", "date"),
+ ),
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_groupby_resample_on_index_with_list_of_keys_missing_column():
+ # GH 50876
+ df = DataFrame(
+ data={
+ "group": [0, 0, 0, 0, 1, 1, 1, 1],
+ "val": [3, 1, 4, 1, 5, 9, 2, 6],
+ },
+ index=Series(
+ date_range(start="2016-01-01", periods=8),
+ name="date",
+ ),
+ )
+ with pytest.raises(KeyError, match="Columns not found"):
+ df.groupby("group").resample("2D")[["val_not_in_dataframe"]].mean()
| - [x] closes #50840 (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50876 | 2023-01-19T21:54:56Z | 2023-01-23T18:04:54Z | 2023-01-23T18:04:54Z | 2023-01-23T18:05:02Z |
DEPR: remove Int/Uint/Float64Index from pandas/tests/arrays | diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py
index ba33cfe055bb3..36c06a77b7822 100644
--- a/pandas/tests/arrays/categorical/test_constructors.py
+++ b/pandas/tests/arrays/categorical/test_constructors.py
@@ -29,7 +29,6 @@
timedelta_range,
)
import pandas._testing as tm
-from pandas.core.api import Int64Index
class TestCategoricalConstructors:
@@ -74,7 +73,7 @@ def test_constructor_empty(self):
tm.assert_index_equal(c.categories, expected)
c = Categorical([], categories=[1, 2, 3])
- expected = Int64Index([1, 2, 3])
+ expected = Index([1, 2, 3], dtype=np.int64)
tm.assert_index_equal(c.categories, expected)
def test_constructor_empty_boolean(self):
diff --git a/pandas/tests/arrays/integer/test_dtypes.py b/pandas/tests/arrays/integer/test_dtypes.py
index f34953876f5f4..629b104dc1424 100644
--- a/pandas/tests/arrays/integer/test_dtypes.py
+++ b/pandas/tests/arrays/integer/test_dtypes.py
@@ -62,8 +62,7 @@ def test_astype_nansafe():
@pytest.mark.parametrize("dropna", [True, False])
def test_construct_index(all_data, dropna):
- # ensure that we do not coerce to Float64Index, rather
- # keep as Index
+ # ensure that we do not coerce to different Index dtype or non-index
all_data = all_data[:10]
if dropna:
diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py
index b2fa4d72a355c..54c8e359b2859 100644
--- a/pandas/tests/arrays/sparse/test_array.py
+++ b/pandas/tests/arrays/sparse/test_array.py
@@ -8,7 +8,6 @@
import pandas as pd
from pandas import isna
import pandas._testing as tm
-from pandas.core.api import Int64Index
from pandas.core.arrays.sparse import (
SparseArray,
SparseDtype,
@@ -469,7 +468,7 @@ def test_dropna(fill_value):
tm.assert_sp_array_equal(arr.dropna(), exp)
df = pd.DataFrame({"a": [0, 1], "b": arr})
- expected_df = pd.DataFrame({"a": [1], "b": exp}, index=Int64Index([1]))
+ expected_df = pd.DataFrame({"a": [1], "b": exp}, index=pd.Index([1]))
tm.assert_equal(df.dropna(), expected_df)
| Extracted from #50479 to make it more manageable.
Progress towards #42717. | https://api.github.com/repos/pandas-dev/pandas/pulls/50875 | 2023-01-19T21:48:27Z | 2023-01-20T11:34:11Z | 2023-01-20T11:34:11Z | 2023-01-20T11:34:19Z |
TST: Add test for to_dict converting masked to python types | diff --git a/pandas/tests/frame/methods/test_to_dict.py b/pandas/tests/frame/methods/test_to_dict.py
index d08323ec01ae8..e14dc8da68136 100644
--- a/pandas/tests/frame/methods/test_to_dict.py
+++ b/pandas/tests/frame/methods/test_to_dict.py
@@ -485,3 +485,13 @@ def test_to_dict_na_to_none(self, orient, expected):
df = DataFrame({"a": [1, NA]}, dtype="Int64")
result = df.to_dict(orient=orient)
assert result == expected
+
+ def test_to_dict_masked_native_python(self):
+ # GH#34665
+ df = DataFrame({"a": Series([1, 2], dtype="Int64"), "B": 1})
+ result = df.to_dict(orient="records")
+ assert type(result[0]["a"]) is int
+
+ df = DataFrame({"a": Series([1, NA], dtype="Int64"), "B": 1})
+ result = df.to_dict(orient="records")
+ assert type(result[0]["a"]) is int
| - [x] closes #34665 (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50874 | 2023-01-19T21:48:08Z | 2023-01-20T18:31:09Z | 2023-01-20T18:31:08Z | 2023-01-21T01:34:18Z |
DOC: improve Timestamps docstrings | diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index c1aef2e5115ac..1a4e90e57d848 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -687,6 +687,14 @@ cdef class _Timestamp(ABCTimestamp):
"""
Return True if date is first day of the year.
+ Returns
+ -------
+ bool
+
+ See Also
+ --------
+ Timestamp.is_year_end : Similar property indicating the end of the year.
+
Examples
--------
>>> ts = pd.Timestamp(2020, 3, 14)
@@ -704,6 +712,14 @@ cdef class _Timestamp(ABCTimestamp):
"""
Return True if date is last day of the year.
+ Returns
+ -------
+ bool
+
+ See Also
+ --------
+ Timestamp.is_year_start : Similar property indicating the start of the year.
+
Examples
--------
>>> ts = pd.Timestamp(2020, 3, 14)
@@ -785,6 +801,10 @@ cdef class _Timestamp(ABCTimestamp):
"""
Return True if year is a leap year.
+ Returns
+ -------
+ bool
+
Examples
--------
>>> ts = pd.Timestamp(2020, 3, 14)
@@ -798,6 +818,10 @@ cdef class _Timestamp(ABCTimestamp):
"""
Return day of the week.
+ Returns
+ -------
+ int
+
Examples
--------
>>> ts = pd.Timestamp(2020, 3, 14)
@@ -811,6 +835,10 @@ cdef class _Timestamp(ABCTimestamp):
"""
Return the day of the year.
+ Returns
+ -------
+ int
+
Examples
--------
>>> ts = pd.Timestamp(2020, 3, 14)
@@ -824,6 +852,10 @@ cdef class _Timestamp(ABCTimestamp):
"""
Return the quarter of the year.
+ Returns
+ -------
+ int
+
Examples
--------
>>> ts = pd.Timestamp(2020, 3, 14)
@@ -837,6 +869,10 @@ cdef class _Timestamp(ABCTimestamp):
"""
Return the week number of the year.
+ Returns
+ -------
+ int
+
Examples
--------
>>> ts = pd.Timestamp(2020, 3, 14)
@@ -850,6 +886,10 @@ cdef class _Timestamp(ABCTimestamp):
"""
Return the number of days in the month.
+ Returns
+ -------
+ int
+
Examples
--------
>>> ts = pd.Timestamp(2020, 3, 14)
| - [x] xref #27977
- ~~[Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development~~/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- ~~Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.~~
- ~~Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.~~
Add _Returns_ and _See Also_ sections:
- is_year_start
- is_year_end
- is_leap_year
- day_of_week
- day_of_year
- quarter
- week
- days_in_month | https://api.github.com/repos/pandas-dev/pandas/pulls/50873 | 2023-01-19T21:23:43Z | 2023-01-23T19:19:17Z | 2023-01-23T19:19:17Z | 2023-01-23T20:21:02Z |
CLN: remove redundant latex options | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 0cea08b3d2c87..8766686b25034 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -624,7 +624,9 @@ The arguments signature is similar, albeit ``col_space`` has been removed since
it is ignored by LaTeX engines. This render engine also requires ``jinja2`` as a
dependency which needs to be installed, since rendering is based upon jinja2 templates.
-The pandas options below are no longer used and will be removed in future releases.
+The pandas latex options below are no longer used and have been removed. The generic
+max rows and columns arguments remain but for this functionality should be replaced
+by the Styler equivalents.
The alternative options giving similar functionality are indicated below:
- ``display.latex.escape``: replaced with ``styler.format.escape``,
@@ -638,6 +640,13 @@ The alternative options giving similar functionality are indicated below:
``styler.render.max_rows``, ``styler.render.max_columns`` and
``styler.render.max_elements``.
+Note that due to this change some defaults have also changed:
+
+- ``multirow`` now defaults to *True*.
+- ``multirow_align`` defaults to *"r"* instead of *"l"*.
+- ``multicol_align`` defaults to *"r"* instead of *"l"*.
+- ``escape`` now defaults to *False*.
+
Note that the behaviour of ``_repr_latex_`` is also changed. Previously
setting ``display.latex.repr`` would generate LaTeX only when using nbconvert for a
JupyterNotebook, and not when the user is running the notebook. Now the
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py
index 2e1ddb3c0a628..dd751075647d8 100644
--- a/pandas/core/config_init.py
+++ b/pandas/core/config_init.py
@@ -210,13 +210,6 @@ def use_numba_cb(key) -> None:
(default: False)
"""
-pc_latex_repr_doc = """
-: boolean
- Whether to produce a latex DataFrame representation for jupyter
- environments that support it.
- (default: False)
-"""
-
pc_table_schema_doc = """
: boolean
Whether to publish a Table Schema representation for frontends
@@ -292,41 +285,6 @@ def use_numba_cb(key) -> None:
df.info() is called. Valid values True,False,'deep'
"""
-pc_latex_escape = """
-: bool
- This specifies if the to_latex method of a Dataframe uses escapes special
- characters.
- Valid values: False,True
-"""
-
-pc_latex_longtable = """
-:bool
- This specifies if the to_latex method of a Dataframe uses the longtable
- format.
- Valid values: False,True
-"""
-
-pc_latex_multicolumn = """
-: bool
- This specifies if the to_latex method of a Dataframe uses multicolumns
- to pretty-print MultiIndex columns.
- Valid values: False,True
-"""
-
-pc_latex_multicolumn_format = """
-: string
- This specifies the format for multicolumn headers.
- Can be surrounded with '|'.
- Valid values: 'l', 'c', 'r', 'p{<width>}'
-"""
-
-pc_latex_multirow = """
-: bool
- This specifies if the to_latex method of a Dataframe uses multirows
- to pretty-print MultiIndex rows.
- Valid values: False,True
-"""
-
def table_schema_cb(key) -> None:
from pandas.io.formats.printing import enable_data_resource_formatter
@@ -425,16 +383,6 @@ def is_terminal() -> bool:
cf.register_option(
"unicode.ambiguous_as_wide", False, pc_east_asian_width_doc, validator=is_bool
)
- cf.register_option("latex.repr", False, pc_latex_repr_doc, validator=is_bool)
- cf.register_option("latex.escape", True, pc_latex_escape, validator=is_bool)
- cf.register_option("latex.longtable", False, pc_latex_longtable, validator=is_bool)
- cf.register_option(
- "latex.multicolumn", True, pc_latex_multicolumn, validator=is_bool
- )
- cf.register_option(
- "latex.multicolumn_format", "l", pc_latex_multicolumn, validator=is_text
- )
- cf.register_option("latex.multirow", False, pc_latex_multirow, validator=is_bool)
cf.register_option(
"html.table_schema",
False,
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index aaf1d0e022fdf..69dc5b47f1af2 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -3228,13 +3228,23 @@ def to_latex(
columns. By default, 'l' will be used for all columns except
columns of numbers, which default to 'r'.
longtable : bool, optional
- By default, the value will be read from the pandas config
- module. Use a longtable environment instead of tabular. Requires
+ Use a longtable environment instead of tabular. Requires
adding a \usepackage{{longtable}} to your LaTeX preamble.
+ By default, the value will be read from the pandas config
+ module, and set to `True` if the option ``styler.latex.environment`` is
+ `"longtable"`.
+
+ .. versionchanged:: 2.0.0
+ The pandas option affecting this argument has changed.
escape : bool, optional
By default, the value will be read from the pandas config
- module. When set to False prevents from escaping latex special
+ module and set to `True` if the option ``styler.format.escape`` is
+ `"latex"`. When set to False prevents from escaping latex special
characters in column names.
+
+ .. versionchanged:: 2.0.0
+ The pandas option affecting this argument has changed, as has the
+ default value to `False`.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'.
@@ -3242,16 +3252,30 @@ def to_latex(
Character recognized as decimal separator, e.g. ',' in Europe.
multicolumn : bool, default True
Use \multicolumn to enhance MultiIndex columns.
- The default will be read from the config module.
- multicolumn_format : str, default 'l'
+ The default will be read from the config module, and is set
+ as the option ``styler.sparse.columns``.
+
+ .. versionchanged:: 2.0.0
+ The pandas option affecting this argument has changed.
+ multicolumn_format : str, default 'r'
The alignment for multicolumns, similar to `column_format`
- The default will be read from the config module.
- multirow : bool, default False
+ The default will be read from the config module, and is set as the option
+ ``styler.latex.multicol_align``.
+
+ .. versionchanged:: 2.0.0
+ The pandas option affecting this argument has changed, as has the
+ default value to "r".
+ multirow : bool, default True
Use \multirow to enhance MultiIndex rows. Requires adding a
\usepackage{{multirow}} to your LaTeX preamble. Will print
centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read
- from the pandas config module.
+ from the pandas config module, and is set as the option
+ ``styler.sparse.index``.
+
+ .. versionchanged:: 2.0.0
+ The pandas option affecting this argument has changed, as has the
+ default value to `True`.
caption : str or tuple, optional
Tuple (full_caption, short_caption),
which results in ``\caption[short_caption]{{full_caption}}``;
@@ -3319,15 +3343,15 @@ def to_latex(
if self.ndim == 1:
self = self.to_frame()
if longtable is None:
- longtable = config.get_option("display.latex.longtable")
+ longtable = config.get_option("styler.latex.environment") == "longtable"
if escape is None:
- escape = config.get_option("display.latex.escape")
+ escape = config.get_option("styler.format.escape") == "latex"
if multicolumn is None:
- multicolumn = config.get_option("display.latex.multicolumn")
+ multicolumn = config.get_option("styler.sparse.columns")
if multicolumn_format is None:
- multicolumn_format = config.get_option("display.latex.multicolumn_format")
+ multicolumn_format = config.get_option("styler.latex.multicol_align")
if multirow is None:
- multirow = config.get_option("display.latex.multirow")
+ multirow = config.get_option("styler.sparse.index")
if column_format is not None and not isinstance(column_format, str):
raise ValueError("`column_format` must be str or unicode")
@@ -3413,7 +3437,9 @@ def _wrap(x, alt_format_):
"label": label,
"position": position,
"column_format": column_format,
- "clines": "skip-last;data" if multirow else None,
+ "clines": "skip-last;data"
+ if (multirow and isinstance(self.index, MultiIndex))
+ else None,
"bold_rows": bold_rows,
}
diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py
index 687bad07926d0..66d8084abea68 100644
--- a/pandas/tests/frame/test_repr_info.py
+++ b/pandas/tests/frame/test_repr_info.py
@@ -292,7 +292,7 @@ def test_latex_repr(self):
\end{tabular}
"""
with option_context(
- "display.latex.escape", False, "styler.render.repr", "latex"
+ "styler.format.escape", None, "styler.render.repr", "latex"
):
df = DataFrame([[r"$\alpha$", "b", "c"], [1, 2, 3]])
result = df._repr_latex_()
diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py
index 42adf3f7b2826..8c66963bbb438 100644
--- a/pandas/tests/io/formats/test_to_latex.py
+++ b/pandas/tests/io/formats/test_to_latex.py
@@ -782,24 +782,14 @@ def test_to_latex_escape_false(self, df_with_symbols):
assert result == expected
def test_to_latex_escape_default(self, df_with_symbols):
- result = df_with_symbols.to_latex() # default: escape=True
- expected = _dedent(
- r"""
- \begin{tabular}{lll}
- \toprule
- & co\$e\textasciicircum x\$ & co\textasciicircum l1 \\
- \midrule
- a & a & a \\
- b & b & b \\
- \bottomrule
- \end{tabular}
- """
- )
- assert result == expected
+ # gh50871: in v2.0 escape is False by default (styler.format.escape=None)
+ default = df_with_symbols.to_latex()
+ specified_true = df_with_symbols.to_latex(escape=True)
+ assert default != specified_true
def test_to_latex_special_escape(self):
df = DataFrame([r"a\b\c", r"^a^b^c", r"~a~b~c"])
- result = df.to_latex()
+ result = df.to_latex(escape=True)
expected = _dedent(
r"""
\begin{tabular}{ll}
@@ -818,7 +808,7 @@ def test_to_latex_special_escape(self):
def test_to_latex_escape_special_chars(self):
special_characters = ["&", "%", "$", "#", "_", "{", "}", "~", "^", "\\"]
df = DataFrame(data=special_characters)
- result = df.to_latex()
+ result = df.to_latex(escape=True)
expected = _dedent(
r"""
\begin{tabular}{ll}
@@ -1039,7 +1029,7 @@ def test_to_latex_multindex_header(self):
# GH 16718
df = DataFrame({"a": [0], "b": [1], "c": [2], "d": [3]})
df = df.set_index(["a", "b"])
- observed = df.to_latex(header=["r1", "r2"])
+ observed = df.to_latex(header=["r1", "r2"], multirow=False)
expected = _dedent(
r"""
\begin{tabular}{llrr}
@@ -1093,7 +1083,7 @@ def test_to_latex_multiindex_column_tabular(self):
def test_to_latex_multiindex_small_tabular(self):
df = DataFrame({("x", "y"): ["a"]}).T
- result = df.to_latex()
+ result = df.to_latex(multirow=False)
expected = _dedent(
r"""
\begin{tabular}{lll}
@@ -1108,7 +1098,7 @@ def test_to_latex_multiindex_small_tabular(self):
assert result == expected
def test_to_latex_multiindex_tabular(self, multiindex_frame):
- result = multiindex_frame.to_latex()
+ result = multiindex_frame.to_latex(multirow=False)
expected = _dedent(
r"""
\begin{tabular}{llrrrr}
@@ -1130,12 +1120,12 @@ def test_to_latex_multicolumn_tabular(self, multiindex_frame):
# GH 14184
df = multiindex_frame.T
df.columns.names = ["a", "b"]
- result = df.to_latex()
+ result = df.to_latex(multirow=False)
expected = _dedent(
r"""
\begin{tabular}{lrrrrr}
\toprule
- a & \multicolumn{2}{l}{c1} & \multicolumn{2}{l}{c2} & c3 \\
+ a & \multicolumn{2}{r}{c1} & \multicolumn{2}{r}{c2} & c3 \\
b & 0 & 1 & 0 & 1 & 0 \\
\midrule
0 & 0 & 4 & 0 & 4 & 0 \\
@@ -1151,7 +1141,7 @@ def test_to_latex_multicolumn_tabular(self, multiindex_frame):
def test_to_latex_index_has_name_tabular(self):
# GH 10660
df = DataFrame({"a": [0, 0, 1, 1], "b": list("abab"), "c": [1, 2, 3, 4]})
- result = df.set_index(["a", "b"]).to_latex()
+ result = df.set_index(["a", "b"]).to_latex(multirow=False)
expected = _dedent(
r"""
\begin{tabular}{llr}
@@ -1172,12 +1162,16 @@ def test_to_latex_index_has_name_tabular(self):
def test_to_latex_groupby_tabular(self):
# GH 10660
df = DataFrame({"a": [0, 0, 1, 1], "b": list("abab"), "c": [1, 2, 3, 4]})
- result = df.groupby("a").describe().to_latex(float_format="{:.1f}".format)
+ result = (
+ df.groupby("a")
+ .describe()
+ .to_latex(float_format="{:.1f}".format, escape=True)
+ )
expected = _dedent(
r"""
\begin{tabular}{lrrrrrrrr}
\toprule
- & \multicolumn{8}{l}{c} \\
+ & \multicolumn{8}{r}{c} \\
& count & mean & std & min & 25\% & 50\% & 75\% & max \\
a & & & & & & & & \\
\midrule
@@ -1200,7 +1194,7 @@ def test_to_latex_multiindex_dupe_level(self):
df = DataFrame(
index=pd.MultiIndex.from_tuples([("A", "c"), ("B", "c")]), columns=["col"]
)
- result = df.to_latex()
+ result = df.to_latex(multirow=False)
expected = _dedent(
r"""
\begin{tabular}{lll}
@@ -1221,7 +1215,7 @@ def test_to_latex_multicolumn_default(self, multicolumn_frame):
r"""
\begin{tabular}{lrrrrr}
\toprule
- & \multicolumn{2}{l}{c1} & \multicolumn{2}{l}{c2} & c3 \\
+ & \multicolumn{2}{r}{c1} & \multicolumn{2}{r}{c2} & c3 \\
& 0 & 1 & 0 & 1 & 0 \\
\midrule
0 & 0 & 5 & 0 & 5 & 0 \\
@@ -1236,7 +1230,7 @@ def test_to_latex_multicolumn_default(self, multicolumn_frame):
assert result == expected
def test_to_latex_multicolumn_false(self, multicolumn_frame):
- result = multicolumn_frame.to_latex(multicolumn=False)
+ result = multicolumn_frame.to_latex(multicolumn=False, multicolumn_format="l")
expected = _dedent(
r"""
\begin{tabular}{lrrrrr}
@@ -1323,11 +1317,11 @@ def test_to_latex_multiindex_names(self, name0, name1, axes):
else ""
)
col_names = [n if (bool(n) and 1 in axes) else "" for n in names]
- observed = df.to_latex()
+ observed = df.to_latex(multirow=False)
# pylint: disable-next=consider-using-f-string
expected = r"""\begin{tabular}{llrrrr}
\toprule
- & %s & \multicolumn{2}{l}{1} & \multicolumn{2}{l}{2} \\
+ & %s & \multicolumn{2}{r}{1} & \multicolumn{2}{r}{2} \\
& %s & 3 & 4 & 3 & 4 \\
%s\midrule
1 & 3 & -1 & -1 & -1 & -1 \\
@@ -1347,7 +1341,7 @@ def test_to_latex_multiindex_nans(self, one_row):
df = DataFrame({"a": [None, 1], "b": [2, 3], "c": [4, 5]})
if one_row:
df = df.iloc[[0]]
- observed = df.set_index(["a", "b"]).to_latex()
+ observed = df.set_index(["a", "b"]).to_latex(multirow=False)
expected = _dedent(
r"""
\begin{tabular}{llr}
@@ -1369,7 +1363,7 @@ def test_to_latex_multiindex_nans(self, one_row):
def test_to_latex_non_string_index(self):
# GH 19981
df = DataFrame([[1, 2, 3]] * 2).set_index([0, 1])
- result = df.to_latex()
+ result = df.to_latex(multirow=False)
expected = _dedent(
r"""
\begin{tabular}{llr}
diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py
index 23de7106e08a4..04dcabd6b832c 100644
--- a/pandas/tests/series/test_repr.py
+++ b/pandas/tests/series/test_repr.py
@@ -219,7 +219,7 @@ def test_latex_repr(self):
\end{tabular}
"""
with option_context(
- "display.latex.escape", False, "styler.render.repr", "latex"
+ "styler.format.escape", None, "styler.render.repr", "latex"
):
s = Series([r"$\alpha$", "b", "c"])
assert result == s._repr_latex_()
| after #48970
| https://api.github.com/repos/pandas-dev/pandas/pulls/50871 | 2023-01-19T20:51:33Z | 2023-02-09T21:46:38Z | 2023-02-09T21:46:38Z | 2023-02-15T19:08:25Z |
BUG: to_datetime with Y or M unit not matching Timestamp | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 1894ce4ee12d9..d7d4538cbe4e0 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -951,6 +951,8 @@ Datetimelike
- Bug in :func:`Timestamp.utctimetuple` raising a ``TypeError`` (:issue:`32174`)
- Bug in :func:`to_datetime` was raising ``ValueError`` when parsing mixed-offset :class:`Timestamp` with ``errors='ignore'`` (:issue:`50585`)
- Bug in :func:`to_datetime` was incorrectly handling floating-point inputs within 1 ``unit`` of the overflow boundaries (:issue:`50183`)
+- Bug in :func:`to_datetime` with unit of "Y" or "M" giving incorrect results, not matching pointwise :class:`Timestamp` results (:issue:`50870`)
+-
Timedelta
^^^^^^^^^
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 5f7fb05876b35..3dcb56c76c4a7 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -220,19 +220,6 @@ def format_array_from_datetime(
return result
-cdef int64_t _wrapped_cast_from_unit(object val, str unit) except? -1:
- """
- Call cast_from_unit and re-raise OverflowError as OutOfBoundsDatetime
- """
- # See also timedeltas._maybe_cast_from_unit
- try:
- return cast_from_unit(val, unit)
- except OverflowError as err:
- raise OutOfBoundsDatetime(
- f"cannot convert input {val} with the unit '{unit}'"
- ) from err
-
-
def array_with_unit_to_datetime(
ndarray[object] values,
str unit,
@@ -302,7 +289,7 @@ def array_with_unit_to_datetime(
if val != val or val == NPY_NAT:
iresult[i] = NPY_NAT
else:
- iresult[i] = _wrapped_cast_from_unit(val, unit)
+ iresult[i] = cast_from_unit(val, unit)
elif isinstance(val, str):
if len(val) == 0 or val in nat_strings:
@@ -317,7 +304,7 @@ def array_with_unit_to_datetime(
f"non convertible value {val} with the unit '{unit}'"
)
- iresult[i] = _wrapped_cast_from_unit(fval, unit)
+ iresult[i] = cast_from_unit(fval, unit)
else:
# TODO: makes more sense as TypeError, but that would be an
@@ -362,7 +349,7 @@ cdef _array_with_unit_to_datetime_object_fallback(ndarray[object] values, str un
else:
try:
oresult[i] = Timestamp(val, unit=unit)
- except OverflowError:
+ except OutOfBoundsDatetime:
oresult[i] = val
elif isinstance(val, str):
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx
index 30f60c392167b..5b636ff69a6a6 100644
--- a/pandas/_libs/tslibs/conversion.pyx
+++ b/pandas/_libs/tslibs/conversion.pyx
@@ -108,22 +108,41 @@ cdef int64_t cast_from_unit(object ts, str unit) except? -1:
if ts is None:
return m
- if unit in ["Y", "M"] and is_float_object(ts) and not ts.is_integer():
- # GH#47267 it is clear that 2 "M" corresponds to 1970-02-01,
- # but not clear what 2.5 "M" corresponds to, so we will
- # disallow that case.
- raise ValueError(
- f"Conversion of non-round float with unit={unit} "
- "is ambiguous"
- )
+ if unit in ["Y", "M"]:
+ if is_float_object(ts) and not ts.is_integer():
+ # GH#47267 it is clear that 2 "M" corresponds to 1970-02-01,
+ # but not clear what 2.5 "M" corresponds to, so we will
+ # disallow that case.
+ raise ValueError(
+ f"Conversion of non-round float with unit={unit} "
+ "is ambiguous"
+ )
+ # GH#47266 go through np.datetime64 to avoid weird results e.g. with "Y"
+ # and 150 we'd get 2120-01-01 09:00:00
+ if is_float_object(ts):
+ ts = int(ts)
+ dt64obj = np.datetime64(ts, unit)
+ return get_datetime64_nanos(dt64obj, NPY_FR_ns)
# cast the unit, multiply base/frace separately
# to avoid precision issues from float -> int
- base = <int64_t>ts
+ try:
+ base = <int64_t>ts
+ except OverflowError as err:
+ raise OutOfBoundsDatetime(
+ f"cannot convert input {ts} with the unit '{unit}'"
+ ) from err
+
frac = ts - base
if p:
frac = round(frac, p)
- return <int64_t>(base * m) + <int64_t>(frac * m)
+
+ try:
+ return <int64_t>(base * m) + <int64_t>(frac * m)
+ except OverflowError as err:
+ raise OutOfBoundsDatetime(
+ f"cannot convert input {ts} with the unit '{unit}'"
+ ) from err
cpdef inline (int64_t, int) precision_from_unit(str unit):
@@ -278,25 +297,13 @@ cdef _TSObject convert_to_tsobject(object ts, tzinfo tz, str unit,
if ts == NPY_NAT:
obj.value = NPY_NAT
else:
- if unit in ["Y", "M"]:
- # GH#47266 cast_from_unit leads to weird results e.g. with "Y"
- # and 150 we'd get 2120-01-01 09:00:00
- ts = np.datetime64(ts, unit)
- return convert_to_tsobject(ts, tz, None, False, False)
-
- ts = ts * cast_from_unit(None, unit)
+ ts = cast_from_unit(ts, unit)
obj.value = ts
pandas_datetime_to_datetimestruct(ts, NPY_FR_ns, &obj.dts)
elif is_float_object(ts):
if ts != ts or ts == NPY_NAT:
obj.value = NPY_NAT
else:
- if unit in ["Y", "M"]:
- if ts == int(ts):
- # GH#47266 Avoid cast_from_unit, which would give weird results
- # e.g. with "Y" and 150.0 we'd get 2120-01-01 09:00:00
- return convert_to_tsobject(int(ts), tz, unit, False, False)
-
ts = cast_from_unit(ts, unit)
obj.value = ts
pandas_datetime_to_datetimestruct(ts, NPY_FR_ns, &obj.dts)
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 8f9dd1fe02c19..feae4d1c28f83 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -373,7 +373,7 @@ cdef _maybe_cast_from_unit(ts, str unit):
# assert unit not in ["Y", "y", "M"]
try:
ts = cast_from_unit(ts, unit)
- except OverflowError as err:
+ except OutOfBoundsDatetime as err:
raise OutOfBoundsTimedelta(
f"Cannot cast {ts} from {unit} to 'ns' without overflow."
) from err
diff --git a/pandas/tests/scalar/timestamp/test_constructors.py b/pandas/tests/scalar/timestamp/test_constructors.py
index 4b7d2bc4b57d4..c6ceb2fcb0ebd 100644
--- a/pandas/tests/scalar/timestamp/test_constructors.py
+++ b/pandas/tests/scalar/timestamp/test_constructors.py
@@ -55,6 +55,15 @@ def test_constructor_from_date_second_reso(self):
ts = Timestamp(obj)
assert ts.unit == "s"
+ @pytest.mark.parametrize("typ", [int, float])
+ def test_construct_from_int_float_with_unit_out_of_bound_raises(self, typ):
+ # GH#50870 make sure we get a OutOfBoundsDatetime instead of OverflowError
+ val = typ(150000000)
+
+ msg = f"cannot convert input {val} with the unit 'D'"
+ with pytest.raises(OutOfBoundsDatetime, match=msg):
+ Timestamp(val, unit="D")
+
@pytest.mark.parametrize("typ", [int, float])
def test_constructor_int_float_with_YM_unit(self, typ):
# GH#47266 avoid the conversions in cast_from_unit
diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py
index 3509c82d2af6d..dfbe78e53de40 100644
--- a/pandas/tests/tools/test_to_datetime.py
+++ b/pandas/tests/tools/test_to_datetime.py
@@ -1701,6 +1701,23 @@ def test_to_datetime_fixed_offset(self):
class TestToDatetimeUnit:
+ @pytest.mark.parametrize("unit", ["Y", "M"])
+ @pytest.mark.parametrize("item", [150, float(150)])
+ def test_to_datetime_month_or_year_unit_int(self, cache, unit, item):
+ # GH#50870 Note we have separate tests that pd.Timestamp gets these right
+ ts = Timestamp(item, unit=unit)
+ expected = DatetimeIndex([ts])
+
+ result = to_datetime([item], unit=unit, cache=cache)
+ tm.assert_index_equal(result, expected)
+
+ # TODO: this should also work
+ # result = to_datetime(np.array([item]), unit=unit, cache=cache)
+ # tm.assert_index_equal(result, expected)
+
+ result = to_datetime(np.array([item], dtype=object), unit=unit, cache=cache)
+ tm.assert_index_equal(result, expected)
+
@pytest.mark.parametrize("unit", ["Y", "M"])
def test_to_datetime_month_or_year_unit_non_round_float(self, cache, unit):
# GH#50301
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50870 | 2023-01-19T20:08:00Z | 2023-01-23T19:25:13Z | 2023-01-23T19:25:13Z | 2023-01-23T19:28:31Z |
REF: simplify array_to_datetime | diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index df6204d61f983..8a13e19738f0c 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -515,15 +515,9 @@ cpdef array_to_datetime(
if val != val or val == NPY_NAT:
iresult[i] = NPY_NAT
- elif is_raise or is_ignore:
- iresult[i] = val
else:
- # coerce
# we now need to parse this as if unit='ns'
- try:
- iresult[i] = cast_from_unit(val, "ns")
- except OverflowError:
- iresult[i] = NPY_NAT
+ iresult[i] = cast_from_unit(val, "ns")
elif isinstance(val, str):
# string
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50869 | 2023-01-19T17:51:17Z | 2023-01-20T18:36:21Z | 2023-01-20T18:36:21Z | 2023-01-20T18:41:07Z |
ENH: pyarrow temporal dtypes support quantile in some cases | diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py
index fcec562279248..4daea133ed14b 100644
--- a/pandas/core/arrays/arrow/array.py
+++ b/pandas/core/arrays/arrow/array.py
@@ -1204,7 +1204,23 @@ def _quantile(
-------
same type as self
"""
- result = pc.quantile(self._data, q=qs, interpolation=interpolation)
+ pa_dtype = self._data.type
+
+ data = self._data
+ if pa.types.is_temporal(pa_dtype) and interpolation in ["lower", "higher"]:
+ # https://github.com/apache/arrow/issues/33769 in these cases
+ # we can cast to ints and back
+ nbits = pa_dtype.bit_width
+ if nbits == 32:
+ data = data.cast(pa.int32())
+ else:
+ data = data.cast(pa.int64())
+
+ result = pc.quantile(data, q=qs, interpolation=interpolation)
+
+ if pa.types.is_temporal(pa_dtype) and interpolation in ["lower", "higher"]:
+ result = result.cast(pa_dtype)
+
return type(self)(result)
def _mode(self: ArrowExtensionArrayT, dropna: bool = True) -> ArrowExtensionArrayT:
diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py
index f7517e1a3ab7f..310673f0aad98 100644
--- a/pandas/tests/extension/test_arrow.py
+++ b/pandas/tests/extension/test_arrow.py
@@ -1294,7 +1294,11 @@ def test_quantile(data, interpolation, quantile, request):
ser.quantile(q=quantile, interpolation=interpolation)
return
- if not (pa.types.is_integer(pa_dtype) or pa.types.is_floating(pa_dtype)):
+ if pa.types.is_integer(pa_dtype) or pa.types.is_floating(pa_dtype):
+ pass
+ elif pa.types.is_temporal(data._data.type) and interpolation in ["lower", "higher"]:
+ pass
+ else:
request.node.add_marker(
pytest.mark.xfail(
raises=pa.ArrowNotImplementedError,
@@ -1308,10 +1312,10 @@ def test_quantile(data, interpolation, quantile, request):
assert result == data[0]
else:
# Just check the values
- result = result.astype("float64[pyarrow]")
- expected = pd.Series(
- data.take([0, 0]).astype("float64[pyarrow]"), index=[0.5, 0.5]
- )
+ expected = pd.Series(data.take([0, 0]), index=[0.5, 0.5])
+ if pa.types.is_integer(pa_dtype) or pa.types.is_floating(pa_dtype):
+ expected = expected.astype("float64[pyarrow]")
+ result = result.astype("float64[pyarrow]")
tm.assert_series_equal(result, expected)
| Cuts 35 seconds off the test runtime. | https://api.github.com/repos/pandas-dev/pandas/pulls/50868 | 2023-01-19T17:40:07Z | 2023-01-20T18:45:49Z | 2023-01-20T18:45:49Z | 2023-01-20T18:49:54Z |
BUG: Fix agg ingore arg/kwargs when given list like func | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index b10f201e79318..e305df7d525fa 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -1304,6 +1304,7 @@ Groupby/resample/rolling
- Bug in :meth:`.DataFrameGroupBy.transform` and :meth:`.SeriesGroupBy.transform` would raise incorrectly when grouper had ``axis=1`` for ``"ngroup"`` argument (:issue:`45986`)
- Bug in :meth:`.DataFrameGroupBy.describe` produced incorrect results when data had duplicate columns (:issue:`50806`)
- Bug in :meth:`.DataFrameGroupBy.agg` with ``engine="numba"`` failing to respect ``as_index=False`` (:issue:`51228`)
+- Bug in :meth:`DataFrameGroupBy.agg`, :meth:`SeriesGroupBy.agg`, and :meth:`Resampler.agg` would ignore arguments when passed a list of functions (:issue:`50863`)
-
Reshaping
@@ -1317,6 +1318,7 @@ Reshaping
- Clarified error message in :func:`merge` when passing invalid ``validate`` option (:issue:`49417`)
- Bug in :meth:`DataFrame.explode` raising ``ValueError`` on multiple columns with ``NaN`` values or empty lists (:issue:`46084`)
- Bug in :meth:`DataFrame.transpose` with ``IntervalDtype`` column with ``timedelta64[ns]`` endpoints (:issue:`44917`)
+- Bug in :meth:`DataFrame.agg` and :meth:`Series.agg` would ignore arguments when passed a list of functions (:issue:`50863`)
-
Sparse
diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index c28da1bc758cd..f29a6ce4c0b82 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -332,19 +332,28 @@ def agg_list_like(self) -> DataFrame | Series:
for a in arg:
colg = obj._gotitem(selected_obj.name, ndim=1, subset=selected_obj)
- new_res = colg.aggregate(a)
+ if isinstance(colg, (ABCSeries, ABCDataFrame)):
+ new_res = colg.aggregate(
+ a, self.axis, *self.args, **self.kwargs
+ )
+ else:
+ new_res = colg.aggregate(a, *self.args, **self.kwargs)
results.append(new_res)
# make sure we find a good name
name = com.get_callable_name(a) or a
keys.append(name)
- # multiples
else:
indices = []
for index, col in enumerate(selected_obj):
colg = obj._gotitem(col, ndim=1, subset=selected_obj.iloc[:, index])
- new_res = colg.aggregate(arg)
+ if isinstance(colg, (ABCSeries, ABCDataFrame)):
+ new_res = colg.aggregate(
+ arg, self.axis, *self.args, **self.kwargs
+ )
+ else:
+ new_res = colg.aggregate(arg, *self.args, **self.kwargs)
results.append(new_res)
indices.append(index)
keys = selected_obj.columns.take(indices)
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 7745de87633eb..244afa61701d8 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -243,7 +243,7 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs)
# Catch instances of lists / tuples
# but not the class list / tuple itself.
func = maybe_mangle_lambdas(func)
- ret = self._aggregate_multiple_funcs(func)
+ ret = self._aggregate_multiple_funcs(func, *args, **kwargs)
if relabeling:
# columns is not narrowed by mypy from relabeling flag
assert columns is not None # for mypy
@@ -275,7 +275,7 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs)
agg = aggregate
- def _aggregate_multiple_funcs(self, arg) -> DataFrame:
+ def _aggregate_multiple_funcs(self, arg, *args, **kwargs) -> DataFrame:
if isinstance(arg, dict):
if self.as_index:
# GH 15931
@@ -300,7 +300,7 @@ def _aggregate_multiple_funcs(self, arg) -> DataFrame:
for idx, (name, func) in enumerate(arg):
key = base.OutputKey(label=name, position=idx)
- results[key] = self.aggregate(func)
+ results[key] = self.aggregate(func, *args, **kwargs)
if any(isinstance(x, DataFrame) for x in results.values()):
from pandas import concat
diff --git a/pandas/tests/apply/test_frame_apply.py b/pandas/tests/apply/test_frame_apply.py
index 7aaad4d2ad081..a0e667dc8f243 100644
--- a/pandas/tests/apply/test_frame_apply.py
+++ b/pandas/tests/apply/test_frame_apply.py
@@ -1623,3 +1623,25 @@ def test_any_apply_keyword_non_zero_axis_regression():
result = df.apply("any", 1)
tm.assert_series_equal(result, expected)
+
+
+def test_agg_list_like_func_with_args():
+ # GH 50624
+ df = DataFrame({"x": [1, 2, 3]})
+
+ def foo1(x, a=1, c=0):
+ return x + a + c
+
+ def foo2(x, b=2, c=0):
+ return x + b + c
+
+ msg = r"foo1\(\) got an unexpected keyword argument 'b'"
+ with pytest.raises(TypeError, match=msg):
+ df.agg([foo1, foo2], 0, 3, b=3, c=4)
+
+ result = df.agg([foo1, foo2], 0, 3, c=4)
+ expected = DataFrame(
+ [[8, 8], [9, 9], [10, 10]],
+ columns=MultiIndex.from_tuples([("x", "foo1"), ("x", "foo2")]),
+ )
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/apply/test_series_apply.py b/pandas/tests/apply/test_series_apply.py
index 53dee6e15c3e0..30f040b4197eb 100644
--- a/pandas/tests/apply/test_series_apply.py
+++ b/pandas/tests/apply/test_series_apply.py
@@ -107,6 +107,26 @@ def f(x, a=0, b=0, c=0):
tm.assert_series_equal(result, expected)
+def test_agg_list_like_func_with_args():
+ # GH 50624
+
+ s = Series([1, 2, 3])
+
+ def foo1(x, a=1, c=0):
+ return x + a + c
+
+ def foo2(x, b=2, c=0):
+ return x + b + c
+
+ msg = r"foo1\(\) got an unexpected keyword argument 'b'"
+ with pytest.raises(TypeError, match=msg):
+ s.agg([foo1, foo2], 0, 3, b=3, c=4)
+
+ result = s.agg([foo1, foo2], 0, 3, c=4)
+ expected = DataFrame({"foo1": [8, 9, 10], "foo2": [8, 9, 10]})
+ tm.assert_frame_equal(result, expected)
+
+
def test_series_map_box_timestamps():
# GH#2689, GH#2627
ser = Series(pd.date_range("1/1/2000", periods=10))
diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index e7be78be55620..22c9bbd74395d 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -1468,3 +1468,49 @@ def test_agg_of_mode_list(test, constant):
expected = expected.set_index(0)
tm.assert_frame_equal(result, expected)
+
+
+def test__dataframe_groupy_agg_list_like_func_with_args():
+ # GH 50624
+ df = DataFrame({"x": [1, 2, 3], "y": ["a", "b", "c"]})
+ gb = df.groupby("y")
+
+ def foo1(x, a=1, c=0):
+ return x.sum() + a + c
+
+ def foo2(x, b=2, c=0):
+ return x.sum() + b + c
+
+ msg = r"foo1\(\) got an unexpected keyword argument 'b'"
+ with pytest.raises(TypeError, match=msg):
+ gb.agg([foo1, foo2], 3, b=3, c=4)
+
+ result = gb.agg([foo1, foo2], 3, c=4)
+ expected = DataFrame(
+ [[8, 8], [9, 9], [10, 10]],
+ index=Index(["a", "b", "c"], name="y"),
+ columns=MultiIndex.from_tuples([("x", "foo1"), ("x", "foo2")]),
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test__series_groupy_agg_list_like_func_with_args():
+ # GH 50624
+ s = Series([1, 2, 3])
+ sgb = s.groupby(s)
+
+ def foo1(x, a=1, c=0):
+ return x.sum() + a + c
+
+ def foo2(x, b=2, c=0):
+ return x.sum() + b + c
+
+ msg = r"foo1\(\) got an unexpected keyword argument 'b'"
+ with pytest.raises(TypeError, match=msg):
+ sgb.agg([foo1, foo2], 3, b=3, c=4)
+
+ result = sgb.agg([foo1, foo2], 3, c=4)
+ expected = DataFrame(
+ [[8, 8], [9, 9], [10, 10]], index=Index([1, 2, 3]), columns=["foo1", "foo2"]
+ )
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py
index e6e924793389d..0b8dc8f3e8ac4 100644
--- a/pandas/tests/resample/test_resample_api.py
+++ b/pandas/tests/resample/test_resample_api.py
@@ -633,6 +633,31 @@ def test_try_aggregate_non_existing_column():
df.resample("30T").agg({"x": ["mean"], "y": ["median"], "z": ["sum"]})
+def test_agg_list_like_func_with_args():
+ # 50624
+ df = DataFrame(
+ {"x": [1, 2, 3]}, index=date_range("2020-01-01", periods=3, freq="D")
+ )
+
+ def foo1(x, a=1, c=0):
+ return x + a + c
+
+ def foo2(x, b=2, c=0):
+ return x + b + c
+
+ msg = r"foo1\(\) got an unexpected keyword argument 'b'"
+ with pytest.raises(TypeError, match=msg):
+ df.resample("D").agg([foo1, foo2], 3, b=3, c=4)
+
+ result = df.resample("D").agg([foo1, foo2], 3, c=4)
+ expected = DataFrame(
+ [[8, 8], [9, 9], [10, 10]],
+ index=date_range("2020-01-01", periods=3, freq="D"),
+ columns=pd.MultiIndex.from_tuples([("x", "foo1"), ("x", "foo2")]),
+ )
+ tm.assert_frame_equal(result, expected)
+
+
def test_selection_api_validation():
# GH 13500
index = date_range(datetime(2005, 1, 1), datetime(2005, 1, 10), freq="D")
| - [x] closes #50624 (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Previous [Pull Request #50725](https://github.com/pandas-dev/pandas/pull/50725), for multiple elements-func list this time.
I would appreciate any feedback or suggestions on this draft pull request to help improve it. Thank you in advance for your help and cooperation. | https://api.github.com/repos/pandas-dev/pandas/pulls/50863 | 2023-01-19T13:25:40Z | 2023-02-12T21:31:31Z | 2023-02-12T21:31:31Z | 2023-02-13T03:40:53Z |
DOC: Add missing points to release instructions | diff --git a/doc/source/development/maintaining.rst b/doc/source/development/maintaining.rst
index 6e9f622e18eea..31c3b8ee0652d 100644
--- a/doc/source/development/maintaining.rst
+++ b/doc/source/development/maintaining.rst
@@ -474,14 +474,21 @@ which will be triggered when the tag is pushed.
Post-Release
````````````
-1. Close the milestone and the issue for the released version.
+1. Update symlink to stable documentation by logging in to our web server, and
+ editing ``/var/www/html/pandas-docs/stable`` to point to ``version/<latest-version>``.
-2. Create a new issue for the next release, with the estimated date or release.
+2. If releasing a major or minor release, open a PR in our source code to update
+ ``web/pandas/versions.json``, to have the desired versions in the documentation
+ dropdown menu.
-3. Open a PR with the placeholder for the release notes of the next version. See
+3. Close the milestone and the issue for the released version.
+
+4. Create a new issue for the next release, with the estimated date of release.
+
+5. Open a PR with the placeholder for the release notes of the next version. See
for example [the PR for 1.5.3](https://github.com/pandas-dev/pandas/pull/49843/files).
-4. Announce the new release in the official channels (use previous announcements
+6. Announce the new release in the official channels (use previous announcements
for reference):
- The pandas-dev and pydata mailing lists
| The release instructions are missing the update of the symlink to the stable docs, as well as the update of the versions dropdown in the docs. Adding it here, as well as fixing a small typo. | https://api.github.com/repos/pandas-dev/pandas/pulls/50861 | 2023-01-19T08:51:06Z | 2023-01-19T17:46:32Z | 2023-01-19T17:46:32Z | 2023-01-19T17:46:43Z |
Backport PR #50855 on branch 1.5.x (RLS/DOC: Adding release notes for 1.5.4) | diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst
index e2f3b45d47bef..5934bd3f61f6b 100644
--- a/doc/source/whatsnew/index.rst
+++ b/doc/source/whatsnew/index.rst
@@ -16,6 +16,7 @@ Version 1.5
.. toctree::
:maxdepth: 2
+ v1.5.4
v1.5.3
v1.5.2
v1.5.1
diff --git a/doc/source/whatsnew/v1.5.2.rst b/doc/source/whatsnew/v1.5.2.rst
index 6397016d827f2..efc8ca6afa342 100644
--- a/doc/source/whatsnew/v1.5.2.rst
+++ b/doc/source/whatsnew/v1.5.2.rst
@@ -43,4 +43,4 @@ Other
Contributors
~~~~~~~~~~~~
-.. contributors:: v1.5.1..v1.5.2|HEAD
+.. contributors:: v1.5.1..v1.5.2
diff --git a/doc/source/whatsnew/v1.5.3.rst b/doc/source/whatsnew/v1.5.3.rst
index 97c4c73f08c37..dae1e9b84cd67 100644
--- a/doc/source/whatsnew/v1.5.3.rst
+++ b/doc/source/whatsnew/v1.5.3.rst
@@ -56,4 +56,4 @@ Other
Contributors
~~~~~~~~~~~~
-.. contributors:: v1.5.2..v1.5.3|HEAD
+.. contributors:: v1.5.2..v1.5.3
diff --git a/doc/source/whatsnew/v1.5.4.rst b/doc/source/whatsnew/v1.5.4.rst
new file mode 100644
index 0000000000000..0d91424eb65ac
--- /dev/null
+++ b/doc/source/whatsnew/v1.5.4.rst
@@ -0,0 +1,38 @@
+.. _whatsnew_154:
+
+What's new in 1.5.4 (March XX, 2023)
+--------------------------------------
+
+These are the changes in pandas 1.5.4. See :ref:`release` for a full changelog
+including other versions of pandas.
+
+{{ header }}
+
+.. ---------------------------------------------------------------------------
+.. _whatsnew_154.regressions:
+
+Fixed regressions
+~~~~~~~~~~~~~~~~~
+-
+
+.. ---------------------------------------------------------------------------
+.. _whatsnew_154.bug_fixes:
+
+Bug fixes
+~~~~~~~~~
+-
+
+.. ---------------------------------------------------------------------------
+.. _whatsnew_154.other:
+
+Other
+~~~~~
+-
+
+.. ---------------------------------------------------------------------------
+.. _whatsnew_154.contributors:
+
+Contributors
+~~~~~~~~~~~~
+
+.. contributors:: v1.5.3..v1.5.4|HEAD
| Backport PR #50855: RLS/DOC: Adding release notes for 1.5.4 | https://api.github.com/repos/pandas-dev/pandas/pulls/50860 | 2023-01-19T08:50:26Z | 2023-01-19T10:50:12Z | 2023-01-19T10:50:12Z | 2023-01-19T10:50:13Z |
Add script to automatically download wheels | diff --git a/doc/source/development/maintaining.rst b/doc/source/development/maintaining.rst
index 6e9f622e18eea..23eb5d7b18c7d 100644
--- a/doc/source/development/maintaining.rst
+++ b/doc/source/development/maintaining.rst
@@ -465,7 +465,8 @@ which will be triggered when the tag is pushed.
7. Download all wheels from the Anaconda repository where MacPython uploads them:
https://anaconda.org/multibuild-wheels-staging/pandas/files?version=<version>
- to the ``dist/`` directory in the local pandas copy.
+ to the ``dist/`` directory in the local pandas copy. You can use the script
+ ``scripts/download_wheels.sh`` to download all wheels at once.
8. Upload wheels to PyPI:
diff --git a/scripts/download_wheels.sh b/scripts/download_wheels.sh
new file mode 100755
index 0000000000000..0b92e83113f5f
--- /dev/null
+++ b/scripts/download_wheels.sh
@@ -0,0 +1,28 @@
+#!/bin/sh
+#
+# Download all wheels for a pandas version.
+#
+# This script is mostly useful during the release process, when wheels
+# generated by the MacPython repo need to be downloaded locally to then
+# be uploaded to the PyPI.
+#
+# There is no API to access the wheel files, so the script downloads the
+# website, extracts the file urls from the html, and then downloads it
+# one by one to the dist/ directory where they would be generated.
+
+VERSION=$1
+DIST_DIR="$(realpath $(dirname -- $0)/../dist)"
+
+if [ -z $VERSION ]; then
+ printf "Usage:\n\t$0 <version>\n\nWhere <version> is for example 1.5.3"
+ exit 1
+fi
+
+curl "https://anaconda.org/multibuild-wheels-staging/pandas/files?version=${VERSION}" | \
+ grep "href=\"/multibuild-wheels-staging/pandas/${VERSION}" | \
+ sed -r 's/.*<a href="([^"]+\.whl)">.*/\1/g' | \
+ awk '{print "https://anaconda.org" $0 }' | \
+ xargs wget -P $DIST_DIR
+
+printf "\nWheels downloaded to $DIST_DIR\nYou can upload them to PyPI using:\n\n"
+printf "\ttwine upload ${DIST_DIR}/pandas-${VERSION}*.{whl,tar.gz} --skip-existing"
| There is a similar script in the pandas-release repo, but that repo is not needed anyomore for the release, and it makes more sense to have it here. Also, the original script is in Python, which makes it much more complex. Here I use a shell script with a single command. | https://api.github.com/repos/pandas-dev/pandas/pulls/50859 | 2023-01-19T08:02:53Z | 2023-01-26T19:02:21Z | 2023-01-26T19:02:21Z | 2023-01-26T19:02:22Z |
STYLE: remove absolufy imports, use ruff | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index dfef9844ce6cb..588d0fee686f3 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -19,11 +19,6 @@ repos:
rev: v0.0.215
hooks:
- id: ruff
-- repo: https://github.com/MarcoGorelli/absolufy-imports
- rev: v0.3.1
- hooks:
- - id: absolufy-imports
- files: ^pandas/
- repo: https://github.com/jendrikseipp/vulture
rev: 'v2.6'
hooks:
diff --git a/pyproject.toml b/pyproject.toml
index dc237d32c022c..fcd2d79adf534 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -203,6 +203,8 @@ select = [
"Q",
# pylint
"PLE", "PLR", "PLW",
+ # tidy imports
+ "TID",
]
ignore = [
@@ -254,6 +256,9 @@ exclude = [
"env",
]
+[tool.ruff.per-file-ignores]
+"asv_bench/*" = ["RUF", "E402"]
+
[tool.pylint.messages_control]
max-line-length = 88
disable = [
| - [ ] closes #50812
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50858 | 2023-01-19T05:59:31Z | 2023-02-13T18:56:44Z | null | 2023-02-13T18:56:45Z |
BUG/PERF: Series(category).replace | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index dc05745c8c0e5..873c7aabde785 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -883,6 +883,7 @@ Performance improvements
- Performance improvement in :func:`merge` and :meth:`DataFrame.join` when joining on a sorted :class:`MultiIndex` (:issue:`48504`)
- Performance improvement in :func:`to_datetime` when parsing strings with timezone offsets (:issue:`50107`)
- Performance improvement in :meth:`DataFrame.loc` and :meth:`Series.loc` for tuple-based indexing of a :class:`MultiIndex` (:issue:`48384`)
+- Performance improvement for :meth:`Series.replace` with categorical dtype (:issue:`49404`)
- Performance improvement for :meth:`MultiIndex.unique` (:issue:`48335`)
- Performance improvement for :func:`concat` with extension array backed indexes (:issue:`49128`, :issue:`49178`)
- Reduce memory usage of :meth:`DataFrame.to_pickle`/:meth:`Series.to_pickle` when using BZ2 or LZMA (:issue:`49068`)
@@ -927,6 +928,8 @@ Bug fixes
Categorical
^^^^^^^^^^^
- Bug in :meth:`Categorical.set_categories` losing dtype information (:issue:`48812`)
+- Bug in :meth:`Series.replace` with categorical dtype when ``to_replace`` values overlap with new values (:issue:`49404`)
+- Bug in :meth:`Series.replace` with categorical dtype losing nullable dtypes of underlying categories (:issue:`49404`)
- Bug in :meth:`DataFrame.groupby` and :meth:`Series.groupby` would reorder categories when used as a grouper (:issue:`48749`)
- Bug in :class:`Categorical` constructor when constructing from a :class:`Categorical` object and ``dtype="category"`` losing ordered-ness (:issue:`49309`)
-
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 14f334d72dbb1..5b61695410474 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -1137,14 +1137,9 @@ def remove_categories(self, removals):
if not is_list_like(removals):
removals = [removals]
- removal_set = set(removals)
- not_included = removal_set - set(self.dtype.categories)
- new_categories = [c for c in self.dtype.categories if c not in removal_set]
-
- # GH 10156
- if any(isna(removals)):
- not_included = {x for x in not_included if notna(x)}
- new_categories = [x for x in new_categories if notna(x)]
+ removals = {x for x in set(removals) if notna(x)}
+ new_categories = self.dtype.categories.difference(removals)
+ not_included = removals.difference(self.dtype.categories)
if len(not_included) != 0:
raise ValueError(f"removals must all be in old categories: {not_included}")
@@ -2273,42 +2268,28 @@ def isin(self, values) -> npt.NDArray[np.bool_]:
return algorithms.isin(self.codes, code_values)
def _replace(self, *, to_replace, value, inplace: bool = False):
+ from pandas import Index
+
inplace = validate_bool_kwarg(inplace, "inplace")
cat = self if inplace else self.copy()
- # other cases, like if both to_replace and value are list-like or if
- # to_replace is a dict, are handled separately in NDFrame
- if not is_list_like(to_replace):
- to_replace = [to_replace]
-
- categories = cat.categories.tolist()
- removals = set()
- for replace_value in to_replace:
- if value == replace_value:
- continue
- if replace_value not in cat.categories:
- continue
- if isna(value):
- removals.add(replace_value)
- continue
-
- index = categories.index(replace_value)
-
- if value in cat.categories:
- value_index = categories.index(value)
- cat._codes[cat._codes == index] = value_index
- removals.add(replace_value)
- else:
- categories[index] = value
- cat._set_categories(categories)
+ mask = isna(np.asarray(value))
+ if mask.any():
+ removals = np.asarray(to_replace)[mask]
+ removals = cat.categories[cat.categories.isin(removals)]
+ new_cat = cat.remove_categories(removals)
+ NDArrayBacked.__init__(cat, new_cat.codes, new_cat.dtype)
- if len(removals):
- new_categories = [c for c in categories if c not in removals]
- new_dtype = CategoricalDtype(new_categories, ordered=self.dtype.ordered)
- codes = recode_for_categories(
- cat.codes, cat.categories, new_dtype.categories
- )
- NDArrayBacked.__init__(cat, codes, new_dtype)
+ ser = cat.categories.to_series()
+ ser = ser.replace(to_replace=to_replace, value=value)
+
+ all_values = Index(ser)
+ new_categories = Index(ser.drop_duplicates(keep="first"))
+ new_codes = recode_for_categories(
+ cat._codes, all_values, new_categories, copy=False
+ )
+ new_dtype = CategoricalDtype(new_categories, ordered=self.dtype.ordered)
+ NDArrayBacked.__init__(cat, new_codes, new_dtype)
if not inplace:
return cat
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 00ab9d02cee00..8fb6a18ca137a 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -536,12 +536,10 @@ def replace(
if isinstance(values, Categorical):
# TODO: avoid special-casing
+ # GH49404
blk = self if inplace else self.copy()
- # error: Item "ExtensionArray" of "Union[ndarray[Any, Any],
- # ExtensionArray]" has no attribute "_replace"
- blk.values._replace( # type: ignore[union-attr]
- to_replace=to_replace, value=value, inplace=True
- )
+ values = cast(Categorical, blk.values)
+ values._replace(to_replace=to_replace, value=value, inplace=True)
return [blk]
if not self._can_hold_element(to_replace):
@@ -651,6 +649,14 @@ def replace_list(
"""
values = self.values
+ if isinstance(values, Categorical):
+ # TODO: avoid special-casing
+ # GH49404
+ blk = self if inplace else self.copy()
+ values = cast(Categorical, blk.values)
+ values._replace(to_replace=src_list, value=dest_list, inplace=True)
+ return [blk]
+
# Exclude anything that we know we won't contain
pairs = [
(x, y) for x, y in zip(src_list, dest_list) if self._can_hold_element(x)
diff --git a/pandas/tests/arrays/categorical/test_replace.py b/pandas/tests/arrays/categorical/test_replace.py
index a3ba420c84a17..c25f1d9c9feac 100644
--- a/pandas/tests/arrays/categorical/test_replace.py
+++ b/pandas/tests/arrays/categorical/test_replace.py
@@ -21,6 +21,11 @@
((5, 6), 2, [1, 2, 3], False),
([1], [2], [2, 2, 3], False),
([1, 4], [5, 2], [5, 2, 3], False),
+ # GH49404: overlap between to_replace and value
+ ([1, 2, 3], [2, 3, 4], [2, 3, 4], False),
+ # GH50872, GH46884: replace with null
+ (1, None, [None, 2, 3], False),
+ (1, pd.NA, [None, 2, 3], False),
# check_categorical sorts categories, which crashes on mixed dtypes
(3, "4", [1, 2, "4"], False),
([1, 2, "3"], "5", ["5", "5", 3], True),
@@ -65,3 +70,11 @@ def test_replace_categorical(to_replace, value, result, expected_error_msg):
pd.Series(cat).replace(to_replace, value, inplace=True)
tm.assert_categorical_equal(cat, expected)
+
+
+def test_replace_categorical_ea_dtype():
+ # GH49404
+ cat = Categorical(pd.array(["a", "b"], dtype="string"))
+ result = pd.Series(cat).replace(["a", "b"], ["c", pd.NA])._values
+ expected = Categorical(pd.array(["c", pd.NA], dtype="string"))
+ tm.assert_categorical_equal(result, expected)
| - [x] closes #50872
- [x] closes #46884
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/v2.0.0.rst` file if fixing a bug or adding a new feature.
Redo of #49404 with fix that was reverted in #50848 | https://api.github.com/repos/pandas-dev/pandas/pulls/50857 | 2023-01-19T04:52:44Z | 2023-01-24T19:02:33Z | 2023-01-24T19:02:33Z | 2023-02-23T01:38:38Z |
RLS/DOC: Adding release notes for 1.5.4 | diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst
index 821f77dbba3e2..69f845096ea24 100644
--- a/doc/source/whatsnew/index.rst
+++ b/doc/source/whatsnew/index.rst
@@ -24,6 +24,7 @@ Version 1.5
.. toctree::
:maxdepth: 2
+ v1.5.4
v1.5.3
v1.5.2
v1.5.1
diff --git a/doc/source/whatsnew/v1.5.2.rst b/doc/source/whatsnew/v1.5.2.rst
index 6397016d827f2..efc8ca6afa342 100644
--- a/doc/source/whatsnew/v1.5.2.rst
+++ b/doc/source/whatsnew/v1.5.2.rst
@@ -43,4 +43,4 @@ Other
Contributors
~~~~~~~~~~~~
-.. contributors:: v1.5.1..v1.5.2|HEAD
+.. contributors:: v1.5.1..v1.5.2
diff --git a/doc/source/whatsnew/v1.5.3.rst b/doc/source/whatsnew/v1.5.3.rst
index 67c2347fe53ac..5358d45048af8 100644
--- a/doc/source/whatsnew/v1.5.3.rst
+++ b/doc/source/whatsnew/v1.5.3.rst
@@ -55,4 +55,4 @@ Other
Contributors
~~~~~~~~~~~~
-.. contributors:: v1.5.2..v1.5.3|HEAD
+.. contributors:: v1.5.2..v1.5.3
diff --git a/doc/source/whatsnew/v1.5.4.rst b/doc/source/whatsnew/v1.5.4.rst
new file mode 100644
index 0000000000000..0d91424eb65ac
--- /dev/null
+++ b/doc/source/whatsnew/v1.5.4.rst
@@ -0,0 +1,38 @@
+.. _whatsnew_154:
+
+What's new in 1.5.4 (March XX, 2023)
+--------------------------------------
+
+These are the changes in pandas 1.5.4. See :ref:`release` for a full changelog
+including other versions of pandas.
+
+{{ header }}
+
+.. ---------------------------------------------------------------------------
+.. _whatsnew_154.regressions:
+
+Fixed regressions
+~~~~~~~~~~~~~~~~~
+-
+
+.. ---------------------------------------------------------------------------
+.. _whatsnew_154.bug_fixes:
+
+Bug fixes
+~~~~~~~~~
+-
+
+.. ---------------------------------------------------------------------------
+.. _whatsnew_154.other:
+
+Other
+~~~~~
+-
+
+.. ---------------------------------------------------------------------------
+.. _whatsnew_154.contributors:
+
+Contributors
+~~~~~~~~~~~~
+
+.. contributors:: v1.5.3..v1.5.4|HEAD
| Adding release notes for 1.5.4. | https://api.github.com/repos/pandas-dev/pandas/pulls/50855 | 2023-01-19T04:08:53Z | 2023-01-19T08:49:51Z | 2023-01-19T08:49:51Z | 2023-01-19T08:49:58Z |
DOC: Fix typo in sed command in the release instructions | diff --git a/doc/source/development/maintaining.rst b/doc/source/development/maintaining.rst
index 6e9f622e18eea..e55ab0d124c45 100644
--- a/doc/source/development/maintaining.rst
+++ b/doc/source/development/maintaining.rst
@@ -458,8 +458,8 @@ which will be triggered when the tag is pushed.
git checkout master
git pull --ff-only upstream master
git checkout -B RLS-<version>
- sed -i 's/BUILD_COMMIT: "v.*/BUILD_COMMIT: "'<version>'"/' azure/windows.yml azure/posix.yml
- sed -i 's/BUILD_COMMIT="v.*/BUILD_COMMIT="'<version>'"/' .travis.yml
+ sed -i 's/BUILD_COMMIT: "v.*/BUILD_COMMIT: "'v<version>'"/' azure/windows.yml azure/posix.yml
+ sed -i 's/BUILD_COMMIT="v.*/BUILD_COMMIT="'v<version>'"/' .travis.yml
git commit -am "RLS <version>"
git push -u origin RLS-<version>
| The `BUILD_COMMIT` in the MacPython CI is the tag name (e.g. `v1.5.3`) and not the version name (e.g. `1.5.3`). The release instructions have it wrong, fixing it here. | https://api.github.com/repos/pandas-dev/pandas/pulls/50854 | 2023-01-19T04:01:42Z | 2023-01-19T17:45:34Z | 2023-01-19T17:45:33Z | 2023-01-19T17:45:41Z |
REF: Use * syntax to make reindex kwargs keyword only | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index dc05745c8c0e5..7e8b640a4e0f3 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -736,7 +736,7 @@ Removal of prior version deprecations/changes
- Disallow passing non-keyword arguments to :meth:`DataFrame.replace`, :meth:`Series.replace` except for ``to_replace`` and ``value`` (:issue:`47587`)
- Disallow passing non-keyword arguments to :meth:`DataFrame.sort_values` except for ``by`` (:issue:`41505`)
- Disallow passing non-keyword arguments to :meth:`Series.sort_values` (:issue:`41505`)
-- Disallow passing 2 non-keyword arguments to :meth:`DataFrame.reindex` (:issue:`17966`)
+- Disallow passing non-keyword arguments to :meth:`DataFrame.reindex` except for ``labels`` (:issue:`17966`)
- Disallow :meth:`Index.reindex` with non-unique :class:`Index` objects (:issue:`42568`)
- Disallowed constructing :class:`Categorical` with scalar ``data`` (:issue:`38433`)
- Disallowed constructing :class:`CategoricalIndex` without passing ``data`` (:issue:`38944`)
diff --git a/pandas/conftest.py b/pandas/conftest.py
index 2e9638036eec5..92624ca54e5db 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -149,8 +149,6 @@ def pytest_collection_modifyitems(items, config) -> None:
ignored_doctest_warnings = [
# Docstring divides by zero to show behavior difference
("missing.mask_zero_div_zero", "divide by zero encountered"),
- # Docstring demonstrates the call raises a warning
- ("_validators.validate_axis_style_args", "Use named arguments"),
]
for item in items:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 9efc07628cccd..f8fdcbdfd34d4 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -101,12 +101,10 @@
Appender,
Substitution,
doc,
- rewrite_axis_style_signature,
)
from pandas.util._exceptions import find_stack_level
from pandas.util._validators import (
validate_ascending,
- validate_axis_style_args,
validate_bool_kwarg,
validate_percentile,
)
@@ -260,11 +258,18 @@
levels and/or column labels.
- if `axis` is 1 or `'columns'` then `by` may contain column
levels and/or index labels.""",
- "optional_labels": """labels : array-like, optional
- New labels / index to conform the axis specified by 'axis' to.""",
- "optional_axis": """axis : int or str, optional
- Axis to target. Can be either the axis name ('index', 'columns')
- or number (0, 1).""",
+ "optional_reindex": """
+labels : array-like, optional
+ New labels / index to conform the axis specified by 'axis' to.
+index : array-like, optional
+ New labels for the index. Preferably an Index object to avoid
+ duplicating data.
+columns : array-like, optional
+ New labels for the columns. Preferably an Index object to avoid
+ duplicating data.
+axis : int or str, optional
+ Axis to target. Can be either the axis name ('index', 'columns')
+ or number (0, 1).""",
"replace_iloc": """
This differs from updating with ``.loc`` or ``.iloc``, which require
you to specify a location to update with some value.""",
@@ -4990,26 +4995,37 @@ def set_axis(
) -> DataFrame:
return super().set_axis(labels, axis=axis, copy=copy)
- @Substitution(**_shared_doc_kwargs)
- @Appender(NDFrame.reindex.__doc__)
- @rewrite_axis_style_signature(
- "labels",
- [
- ("method", None),
- ("copy", None),
- ("level", None),
- ("fill_value", np.nan),
- ("limit", None),
- ("tolerance", None),
- ],
+ @doc(
+ NDFrame.reindex,
+ klass=_shared_doc_kwargs["klass"],
+ optional_reindex=_shared_doc_kwargs["optional_reindex"],
)
- def reindex(self, *args, **kwargs) -> DataFrame:
- axes = validate_axis_style_args(self, args, kwargs, "labels", "reindex")
- kwargs.update(axes)
- # Pop these, since the values are in `kwargs` under different names
- kwargs.pop("axis", None)
- kwargs.pop("labels", None)
- return super().reindex(**kwargs)
+ def reindex( # type: ignore[override]
+ self,
+ labels=None,
+ *,
+ index=None,
+ columns=None,
+ axis: Axis | None = None,
+ method: str | None = None,
+ copy: bool | None = None,
+ level: Level | None = None,
+ fill_value: Scalar | None = np.nan,
+ limit: int | None = None,
+ tolerance=None,
+ ) -> DataFrame:
+ return super().reindex(
+ labels=labels,
+ index=index,
+ columns=columns,
+ axis=axis,
+ method=method,
+ copy=copy,
+ level=level,
+ fill_value=fill_value,
+ limit=limit,
+ tolerance=tolerance,
+ )
@overload
def drop(
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 998c57b66509d..c06f6d7c85673 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -69,6 +69,7 @@
NDFrameT,
RandomState,
Renamer,
+ Scalar,
SortKind,
StorageOptions,
Suffixes,
@@ -5104,11 +5105,21 @@ def sort_index(
@doc(
klass=_shared_doc_kwargs["klass"],
- axes=_shared_doc_kwargs["axes"],
- optional_labels="",
- optional_axis="",
+ optional_reindex="",
)
- def reindex(self: NDFrameT, *args, **kwargs) -> NDFrameT:
+ def reindex(
+ self: NDFrameT,
+ labels=None,
+ index=None,
+ columns=None,
+ axis: Axis | None = None,
+ method: str | None = None,
+ copy: bool_t | None = None,
+ level: Level | None = None,
+ fill_value: Scalar | None = np.nan,
+ limit: int | None = None,
+ tolerance=None,
+ ) -> NDFrameT:
"""
Conform {klass} to new index with optional filling logic.
@@ -5118,11 +5129,7 @@ def reindex(self: NDFrameT, *args, **kwargs) -> NDFrameT:
Parameters
----------
- {optional_labels}
- {axes} : array-like, optional
- New labels / index to conform to, should be specified using
- keywords. Preferably an Index object to avoid duplicating data.
- {optional_axis}
+ {optional_reindex}
method : {{None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
@@ -5311,31 +5318,34 @@ def reindex(self: NDFrameT, *args, **kwargs) -> NDFrameT:
# TODO: Decide if we care about having different examples for different
# kinds
- # construct the args
- axes, kwargs = self._construct_axes_from_arguments(args, kwargs)
- method = clean_reindex_fill_method(kwargs.pop("method", None))
- level = kwargs.pop("level", None)
- copy = kwargs.pop("copy", None)
- limit = kwargs.pop("limit", None)
- tolerance = kwargs.pop("tolerance", None)
- fill_value = kwargs.pop("fill_value", None)
-
- # Series.reindex doesn't use / need the axis kwarg
- # We pop and ignore it here, to make writing Series/Frame generic code
- # easier
- kwargs.pop("axis", None)
-
- if kwargs:
- raise TypeError(
- "reindex() got an unexpected keyword "
- f'argument "{list(kwargs.keys())[0]}"'
- )
+ if index is not None and columns is not None and labels is not None:
+ raise TypeError("Cannot specify all of 'labels', 'index', 'columns'.")
+ elif index is not None or columns is not None:
+ if axis is not None:
+ raise TypeError(
+ "Cannot specify both 'axis' and any of 'index' or 'columns'"
+ )
+ if labels is not None:
+ if index is not None:
+ columns = labels
+ else:
+ index = labels
+ else:
+ if axis and self._get_axis_number(axis) == 1:
+ columns = labels
+ else:
+ index = labels
+ axes: dict[Literal["index", "columns"], Any] = {
+ "index": index,
+ "columns": columns,
+ }
+ method = clean_reindex_fill_method(method)
# if all axes that are requested to reindex are equal, then only copy
# if indicated must have index names equal here as well as values
if all(
- self._get_axis(axis).identical(ax)
- for axis, ax in axes.items()
+ self._get_axis(axis_name).identical(ax)
+ for axis_name, ax in axes.items()
if ax is not None
):
return self.copy(deep=copy)
@@ -5517,7 +5527,7 @@ def filter(
name = self._get_axis_name(axis)
# error: Keywords must be strings
return self.reindex( # type: ignore[misc]
- **{name: [r for r in items if r in labels]}
+ **{name: [r for r in items if r in labels]} # type: ignore[arg-type]
)
elif like:
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index c15948ce877a8..c9bfa18e48a60 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -4098,7 +4098,7 @@ def _reindex_output(
"copy": False,
"fill_value": fill_value,
}
- return output.reindex(**d)
+ return output.reindex(**d) # type: ignore[arg-type]
# GH 13204
# Here, the categorical in-axis groupers, which need to be fully
diff --git a/pandas/core/series.py b/pandas/core/series.py
index c6ba217042353..3d5b44ba52594 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -58,6 +58,7 @@
NaPosition,
QuantileInterpolation,
Renamer,
+ Scalar,
SingleManager,
SortKind,
StorageOptions,
@@ -190,8 +191,12 @@
"duplicated": "Series",
"optional_by": "",
"optional_mapper": "",
- "optional_labels": "",
- "optional_axis": "",
+ "optional_reindex": """
+index : array-like, optional
+ New labels for the index. Preferably an Index object to avoid
+ duplicating data.
+axis : int or str, optional
+ Unused.""",
"replace_iloc": """
This differs from updating with ``.loc`` or ``.iloc``, which require
you to specify a location to update with some value.""",
@@ -4862,21 +4867,29 @@ def set_axis(
@doc(
NDFrame.reindex, # type: ignore[has-type]
klass=_shared_doc_kwargs["klass"],
- axes=_shared_doc_kwargs["axes"],
- optional_labels=_shared_doc_kwargs["optional_labels"],
- optional_axis=_shared_doc_kwargs["optional_axis"],
+ optional_reindex=_shared_doc_kwargs["optional_reindex"],
)
- def reindex(self, *args, **kwargs) -> Series:
- if len(args) > 1:
- raise TypeError("Only one positional argument ('index') is allowed")
- if args:
- (index,) = args
- if "index" in kwargs:
- raise TypeError(
- "'index' passed as both positional and keyword argument"
- )
- kwargs.update({"index": index})
- return super().reindex(**kwargs)
+ def reindex( # type: ignore[override]
+ self,
+ index=None,
+ *,
+ axis: Axis | None = None,
+ method: str | None = None,
+ copy: bool | None = None,
+ level: Level | None = None,
+ fill_value: Scalar | None = None,
+ limit: int | None = None,
+ tolerance=None,
+ ) -> Series:
+ return super().reindex(
+ index=index,
+ method=method,
+ copy=copy,
+ level=level,
+ fill_value=fill_value,
+ limit=limit,
+ tolerance=tolerance,
+ )
@overload
def drop(
diff --git a/pandas/tests/frame/methods/test_reindex.py b/pandas/tests/frame/methods/test_reindex.py
index a627d0fbb4c7a..f455213bd436b 100644
--- a/pandas/tests/frame/methods/test_reindex.py
+++ b/pandas/tests/frame/methods/test_reindex.py
@@ -841,17 +841,18 @@ def test_reindex_positional_raises(self):
# https://github.com/pandas-dev/pandas/issues/12392
# Enforced in 2.0
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
- with pytest.raises(TypeError, match=r".* is ambiguous."):
+ msg = r"reindex\(\) takes from 1 to 2 positional arguments but 3 were given"
+ with pytest.raises(TypeError, match=msg):
df.reindex([0, 1], ["A", "B", "C"])
def test_reindex_axis_style_raises(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
- df.reindex([0, 1], ["A"], axis=1)
+ df.reindex([0, 1], columns=["A"], axis=1)
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
- df.reindex([0, 1], ["A"], axis="index")
+ df.reindex([0, 1], columns=["A"], axis="index")
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis="index")
@@ -866,7 +867,7 @@ def test_reindex_axis_style_raises(self):
df.reindex(index=[0, 1], columns=[0, 1], axis="columns")
with pytest.raises(TypeError, match="Cannot specify all"):
- df.reindex([0, 1], [0], ["A"])
+ df.reindex(labels=[0, 1], index=[0], columns=["A"])
# Mixing styles
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
diff --git a/pandas/tests/series/methods/test_reindex.py b/pandas/tests/series/methods/test_reindex.py
index b00858d2779bc..2c427399c9cd5 100644
--- a/pandas/tests/series/methods/test_reindex.py
+++ b/pandas/tests/series/methods/test_reindex.py
@@ -369,16 +369,15 @@ def test_reindex_periodindex_with_object(p_values, o_values, values, expected_va
def test_reindex_too_many_args():
# GH 40980
ser = Series([1, 2])
- with pytest.raises(
- TypeError, match=r"Only one positional argument \('index'\) is allowed"
- ):
+ msg = r"reindex\(\) takes from 1 to 2 positional arguments but 3 were given"
+ with pytest.raises(TypeError, match=msg):
ser.reindex([2, 3], False)
def test_reindex_double_index():
# GH 40980
ser = Series([1, 2])
- msg = r"'index' passed as both positional and keyword argument"
+ msg = r"reindex\(\) got multiple values for argument 'index'"
with pytest.raises(TypeError, match=msg):
ser.reindex([2, 3], index=[3, 4])
diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py
index 943d0ef3c1332..b60169f8364da 100644
--- a/pandas/util/_validators.py
+++ b/pandas/util/_validators.py
@@ -5,7 +5,6 @@
from __future__ import annotations
from typing import (
- Any,
Iterable,
Sequence,
TypeVar,
@@ -262,94 +261,6 @@ def validate_bool_kwarg(
return value
-def validate_axis_style_args(
- data, args, kwargs, arg_name, method_name
-) -> dict[str, Any]:
- """
- Argument handler for mixed index, columns / axis functions
-
- In an attempt to handle both `.method(index, columns)`, and
- `.method(arg, axis=.)`, we have to do some bad things to argument
- parsing. This translates all arguments to `{index=., columns=.}` style.
-
- Parameters
- ----------
- data : DataFrame
- args : tuple
- All positional arguments from the user
- kwargs : dict
- All keyword arguments from the user
- arg_name, method_name : str
- Used for better error messages
-
- Returns
- -------
- kwargs : dict
- A dictionary of keyword arguments. Doesn't modify ``kwargs``
- inplace, so update them with the return value here.
-
- Examples
- --------
- >>> df = pd.DataFrame(range(2))
- >>> validate_axis_style_args(df, (str.upper,), {'columns': id},
- ... 'mapper', 'rename')
- {'columns': <built-in function id>, 'index': <method 'upper' of 'str' objects>}
- """
- # TODO: Change to keyword-only args and remove all this
-
- out = {}
- # Goal: fill 'out' with index/columns-style arguments
- # like out = {'index': foo, 'columns': bar}
-
- # Start by validating for consistency
- if "axis" in kwargs and any(x in kwargs for x in data._AXIS_TO_AXIS_NUMBER):
- msg = "Cannot specify both 'axis' and any of 'index' or 'columns'."
- raise TypeError(msg)
-
- # First fill with explicit values provided by the user...
- if arg_name in kwargs:
- if args:
- msg = f"{method_name} got multiple values for argument '{arg_name}'"
- raise TypeError(msg)
-
- axis = data._get_axis_name(kwargs.get("axis", 0))
- out[axis] = kwargs[arg_name]
-
- # More user-provided arguments, now from kwargs
- for k, v in kwargs.items():
- try:
- ax = data._get_axis_name(k)
- except ValueError:
- pass
- else:
- out[ax] = v
-
- # All user-provided kwargs have been handled now.
- # Now we supplement with positional arguments, emitting warnings
- # when there's ambiguity and raising when there's conflicts
-
- if len(args) == 0:
- pass # It's up to the function to decide if this is valid
- elif len(args) == 1:
- axis = data._get_axis_name(kwargs.get("axis", 0))
- out[axis] = args[0]
- elif len(args) == 2:
- if "axis" in kwargs:
- # Unambiguously wrong
- msg = "Cannot specify both 'axis' and any of 'index' or 'columns'"
- raise TypeError(msg)
-
- msg = (
- f"'.{method_name}(a, b)' is ambiguous. Use named keyword arguments"
- "for 'index' or 'columns'."
- )
- raise TypeError(msg)
- else:
- msg = f"Cannot specify all of '{arg_name}', 'index', 'columns'."
- raise TypeError(msg)
- return out
-
-
def validate_fillna_kwargs(value, method, validate_scalar_dict_value: bool = True):
"""
Validate the keyword arguments to 'fillna'.
| After enforcing #17966, this makes `DataFrame.reindex` effectively keyword only except for `labels`, so removed ad-hoc logic to enforce that. `Series.reindex` also has similar ad-hoc logic which can be removed.
Also did some docstring cleanup for both functions. | https://api.github.com/repos/pandas-dev/pandas/pulls/50853 | 2023-01-19T02:18:10Z | 2023-01-30T20:07:45Z | 2023-01-30T20:07:45Z | 2023-01-30T20:07:50Z |
REF: consolidate cast_from_unit checks | diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index df6204d61f983..5f7fb05876b35 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -220,6 +220,19 @@ def format_array_from_datetime(
return result
+cdef int64_t _wrapped_cast_from_unit(object val, str unit) except? -1:
+ """
+ Call cast_from_unit and re-raise OverflowError as OutOfBoundsDatetime
+ """
+ # See also timedeltas._maybe_cast_from_unit
+ try:
+ return cast_from_unit(val, unit)
+ except OverflowError as err:
+ raise OutOfBoundsDatetime(
+ f"cannot convert input {val} with the unit '{unit}'"
+ ) from err
+
+
def array_with_unit_to_datetime(
ndarray[object] values,
str unit,
@@ -261,13 +274,10 @@ def array_with_unit_to_datetime(
bint is_raise = errors=="raise"
ndarray[int64_t] iresult
object tz = None
- bint is_ym
float fval
assert is_ignore or is_coerce or is_raise
- is_ym = unit in "YM"
-
if unit == "ns":
result, tz = array_to_datetime(
values.astype(object, copy=False),
@@ -292,19 +302,7 @@ def array_with_unit_to_datetime(
if val != val or val == NPY_NAT:
iresult[i] = NPY_NAT
else:
- if is_ym and is_float_object(val) and not val.is_integer():
- # Analogous to GH#47266 for Timestamp
- raise ValueError(
- f"Conversion of non-round float with unit={unit} "
- "is ambiguous"
- )
-
- try:
- iresult[i] = cast_from_unit(val, unit)
- except OverflowError:
- raise OutOfBoundsDatetime(
- f"cannot convert input {val} with the unit '{unit}'"
- )
+ iresult[i] = _wrapped_cast_from_unit(val, unit)
elif isinstance(val, str):
if len(val) == 0 or val in nat_strings:
@@ -319,23 +317,7 @@ def array_with_unit_to_datetime(
f"non convertible value {val} with the unit '{unit}'"
)
- if is_ym and not fval.is_integer():
- # Analogous to GH#47266 for Timestamp
- raise ValueError(
- f"Conversion of non-round float with unit={unit} "
- "is ambiguous"
- )
-
- try:
- iresult[i] = cast_from_unit(fval, unit)
- except ValueError:
- raise ValueError(
- f"non convertible value {val} with the unit '{unit}'"
- )
- except OverflowError:
- raise OutOfBoundsDatetime(
- f"cannot convert input {val} with the unit '{unit}'"
- )
+ iresult[i] = _wrapped_cast_from_unit(fval, unit)
else:
# TODO: makes more sense as TypeError, but that would be an
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx
index 8cdad5c79bc1b..d862b5bb606cc 100644
--- a/pandas/_libs/tslibs/conversion.pyx
+++ b/pandas/_libs/tslibs/conversion.pyx
@@ -108,6 +108,15 @@ cdef int64_t cast_from_unit(object ts, str unit) except? -1:
if ts is None:
return m
+ if unit in ["Y", "M"] and is_float_object(ts) and not ts.is_integer():
+ # GH#47267 it is clear that 2 "M" corresponds to 1970-02-01,
+ # but not clear what 2.5 "M" corresponds to, so we will
+ # disallow that case.
+ raise ValueError(
+ f"Conversion of non-round float with unit={unit} "
+ "is ambiguous"
+ )
+
# cast the unit, multiply base/frace separately
# to avoid precision issues from float -> int
base = <int64_t>ts
@@ -287,13 +296,6 @@ cdef _TSObject convert_to_tsobject(object ts, tzinfo tz, str unit,
# GH#47266 Avoid cast_from_unit, which would give weird results
# e.g. with "Y" and 150.0 we'd get 2120-01-01 09:00:00
return convert_to_tsobject(int(ts), tz, unit, False, False)
- else:
- # GH#47267 it is clear that 2 "M" corresponds to 1970-02-01,
- # but not clear what 2.5 "M" corresponds to, so we will
- # disallow that case.
- raise ValueError(
- f"Conversion of non-round float with unit={unit} is ambiguous."
- )
ts = cast_from_unit(ts, unit)
obj.value = ts
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50852 | 2023-01-19T01:59:51Z | 2023-01-19T17:50:01Z | 2023-01-19T17:50:01Z | 2023-01-19T17:50:55Z |
REF: tighter typing in parsing.pyx | diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 5f7fb05876b35..0575ac69ca452 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -273,7 +273,7 @@ def array_with_unit_to_datetime(
bint is_coerce = errors=="coerce"
bint is_raise = errors=="raise"
ndarray[int64_t] iresult
- object tz = None
+ tzinfo tz = None
float fval
assert is_ignore or is_coerce or is_raise
@@ -346,7 +346,7 @@ cdef _array_with_unit_to_datetime_object_fallback(ndarray[object] values, str un
cdef:
Py_ssize_t i, n = len(values)
ndarray[object] oresult
- object tz = None
+ tzinfo tz = None
# TODO: fix subtle differences between this and no-unit code
oresult = cnp.PyArray_EMPTY(values.ndim, values.shape, cnp.NPY_OBJECT, 0)
diff --git a/pandas/_libs/tslibs/dtypes.pxd b/pandas/_libs/tslibs/dtypes.pxd
index 3e3f206685d37..c0b0db1336d14 100644
--- a/pandas/_libs/tslibs/dtypes.pxd
+++ b/pandas/_libs/tslibs/dtypes.pxd
@@ -11,6 +11,7 @@ cpdef int64_t periods_per_second(NPY_DATETIMEUNIT reso) except? -1
cpdef NPY_DATETIMEUNIT get_supported_reso(NPY_DATETIMEUNIT reso)
cdef dict attrname_to_abbrevs
+cdef dict npy_unit_to_attrname
cdef enum c_FreqGroup:
# Mirrors FreqGroup in the .pyx file
diff --git a/pandas/_libs/tslibs/dtypes.pyx b/pandas/_libs/tslibs/dtypes.pyx
index 2df5349f45272..928f620b5e7c6 100644
--- a/pandas/_libs/tslibs/dtypes.pyx
+++ b/pandas/_libs/tslibs/dtypes.pyx
@@ -423,3 +423,15 @@ cdef dict _reso_str_map = {
}
cdef dict _str_reso_map = {v: k for k, v in _reso_str_map.items()}
+
+cdef dict npy_unit_to_attrname = {
+ NPY_DATETIMEUNIT.NPY_FR_Y: "year",
+ NPY_DATETIMEUNIT.NPY_FR_M: "month",
+ NPY_DATETIMEUNIT.NPY_FR_D: "day",
+ NPY_DATETIMEUNIT.NPY_FR_h: "hour",
+ NPY_DATETIMEUNIT.NPY_FR_m: "minute",
+ NPY_DATETIMEUNIT.NPY_FR_s: "second",
+ NPY_DATETIMEUNIT.NPY_FR_ms: "millisecond",
+ NPY_DATETIMEUNIT.NPY_FR_us: "microsecond",
+ NPY_DATETIMEUNIT.NPY_FR_ns: "nanosecond",
+}
diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx
index 98667436915f3..8a22f8d45dac9 100644
--- a/pandas/_libs/tslibs/parsing.pyx
+++ b/pandas/_libs/tslibs/parsing.pyx
@@ -12,7 +12,10 @@ from cpython.datetime cimport (
datetime,
datetime_new,
import_datetime,
+ timedelta,
+ tzinfo,
)
+from datetime import timezone
from cpython.object cimport PyObject_Str
from cython cimport Py_ssize_t
from libc.string cimport strchr
@@ -49,6 +52,7 @@ from dateutil.tz import (
from pandas._config import get_option
from pandas._libs.tslibs.ccalendar cimport c_MONTH_NUMBERS
+from pandas._libs.tslibs.dtypes cimport npy_unit_to_attrname
from pandas._libs.tslibs.nattype cimport (
c_NaT as NaT,
c_nat_strings as nat_strings,
@@ -120,7 +124,9 @@ cdef int _parse_4digit(const char* s):
return result
-cdef object _parse_delimited_date(str date_string, bint dayfirst):
+cdef datetime _parse_delimited_date(
+ str date_string, bint dayfirst, NPY_DATETIMEUNIT* creso
+):
"""
Parse special cases of dates: MM/DD/YYYY, DD/MM/YYYY, MM/YYYY.
@@ -138,12 +144,12 @@ cdef object _parse_delimited_date(str date_string, bint dayfirst):
----------
date_string : str
dayfirst : bool
+ creso : NPY_DATETIMEUNIT*
+ For specifying identified resolution.
Returns:
--------
datetime or None
- str or None
- Describing resolution of the parsed string.
"""
cdef:
const char* buf
@@ -157,45 +163,45 @@ cdef object _parse_delimited_date(str date_string, bint dayfirst):
month = _parse_2digit(buf)
day = _parse_2digit(buf + 3)
year = _parse_4digit(buf + 6)
- reso = "day"
+ creso[0] = NPY_DATETIMEUNIT.NPY_FR_D
can_swap = 1
elif length == 9 and _is_delimiter(buf[1]) and _is_delimiter(buf[4]):
# parsing M?DD?YYYY and D?MM?YYYY dates
month = _parse_1digit(buf)
day = _parse_2digit(buf + 2)
year = _parse_4digit(buf + 5)
- reso = "day"
+ creso[0] = NPY_DATETIMEUNIT.NPY_FR_D
can_swap = 1
elif length == 9 and _is_delimiter(buf[2]) and _is_delimiter(buf[4]):
# parsing MM?D?YYYY and DD?M?YYYY dates
month = _parse_2digit(buf)
day = _parse_1digit(buf + 3)
year = _parse_4digit(buf + 5)
- reso = "day"
+ creso[0] = NPY_DATETIMEUNIT.NPY_FR_D
can_swap = 1
elif length == 8 and _is_delimiter(buf[1]) and _is_delimiter(buf[3]):
# parsing M?D?YYYY and D?M?YYYY dates
month = _parse_1digit(buf)
day = _parse_1digit(buf + 2)
year = _parse_4digit(buf + 4)
- reso = "day"
+ creso[0] = NPY_DATETIMEUNIT.NPY_FR_D
can_swap = 1
elif length == 7 and _is_delimiter(buf[2]):
# parsing MM?YYYY dates
if buf[2] == b".":
# we cannot reliably tell whether e.g. 10.2010 is a float
# or a date, thus we refuse to parse it here
- return None, None
+ return None
month = _parse_2digit(buf)
year = _parse_4digit(buf + 3)
- reso = "month"
+ creso[0] = NPY_DATETIMEUNIT.NPY_FR_M
else:
- return None, None
+ return None
if month < 0 or day < 0 or year < 1000:
# some part is not an integer, so
# date_string can't be converted to date, above format
- return None, None
+ return None
if 1 <= month <= MAX_DAYS_IN_MONTH and 1 <= day <= MAX_DAYS_IN_MONTH \
and (month <= MAX_MONTH or day <= MAX_MONTH):
@@ -203,7 +209,7 @@ cdef object _parse_delimited_date(str date_string, bint dayfirst):
day, month = month, day
# In Python <= 3.6.0 there is no range checking for invalid dates
# in C api, thus we call faster C version for 3.6.1 or newer
- return datetime_new(year, month, day, 0, 0, 0, 0, None), reso
+ return datetime_new(year, month, day, 0, 0, 0, 0, None)
raise DateParseError(f"Invalid date specified ({month}/{day})")
@@ -264,6 +270,7 @@ def parse_datetime_string(
cdef:
datetime dt
+ NPY_DATETIMEUNIT creso
if not _does_string_look_like_datetime(date_string):
raise ValueError(f'Given date string "{date_string}" not likely a datetime')
@@ -274,7 +281,7 @@ def parse_datetime_string(
yearfirst=yearfirst)
return dt
- dt, _ = _parse_delimited_date(date_string, dayfirst)
+ dt = _parse_delimited_date(date_string, dayfirst, &creso)
if dt is not None:
return dt
@@ -360,18 +367,19 @@ def parse_datetime_string_with_reso(
bint string_to_dts_failed
npy_datetimestruct dts
NPY_DATETIMEUNIT out_bestunit
- int out_local
+ int out_local = 0
int out_tzoffset
+ tzinfo tz
if not _does_string_look_like_datetime(date_string):
raise ValueError(f'Given date string "{date_string}" not likely a datetime')
- parsed, reso = _parse_delimited_date(date_string, dayfirst)
+ parsed = _parse_delimited_date(date_string, dayfirst, &out_bestunit)
if parsed is not None:
+ reso = npy_unit_to_attrname[out_bestunit]
return parsed, reso
# Try iso8601 first, as it handles nanoseconds
- # TODO: does this render some/all of parse_delimited_date redundant?
string_to_dts_failed = string_to_dts(
date_string, &dts, &out_bestunit, &out_local,
&out_tzoffset, False
@@ -381,31 +389,25 @@ def parse_datetime_string_with_reso(
NPY_DATETIMEUNIT.NPY_FR_ps,
NPY_DATETIMEUNIT.NPY_FR_fs,
NPY_DATETIMEUNIT.NPY_FR_as}
- if out_bestunit in timestamp_units or out_local:
- # TODO: the not-out_local case we could do without Timestamp;
- # avoid circular import
+ if out_bestunit in timestamp_units:
+ # TODO: avoid circular import
from pandas import Timestamp
parsed = Timestamp(date_string)
else:
- parsed = datetime(
- dts.year, dts.month, dts.day, dts.hour, dts.min, dts.sec, dts.us
+ if out_local:
+ tz = timezone(timedelta(minutes=out_tzoffset))
+ else:
+ tz = None
+ parsed = datetime_new(
+ dts.year, dts.month, dts.day, dts.hour, dts.min, dts.sec, dts.us, tz
)
# Match Timestamp and drop picoseconds, femtoseconds, attoseconds
# The new resolution will just be nano
# GH 50417
if out_bestunit in timestamp_units:
out_bestunit = NPY_DATETIMEUNIT.NPY_FR_ns
- reso = {
- NPY_DATETIMEUNIT.NPY_FR_Y: "year",
- NPY_DATETIMEUNIT.NPY_FR_M: "month",
- NPY_DATETIMEUNIT.NPY_FR_D: "day",
- NPY_DATETIMEUNIT.NPY_FR_h: "hour",
- NPY_DATETIMEUNIT.NPY_FR_m: "minute",
- NPY_DATETIMEUNIT.NPY_FR_s: "second",
- NPY_DATETIMEUNIT.NPY_FR_ms: "millisecond",
- NPY_DATETIMEUNIT.NPY_FR_us: "microsecond",
- NPY_DATETIMEUNIT.NPY_FR_ns: "nanosecond",
- }[out_bestunit]
+
+ reso = npy_unit_to_attrname[out_bestunit]
return parsed, reso
try:
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50851 | 2023-01-19T01:57:16Z | 2023-01-20T18:25:46Z | 2023-01-20T18:25:46Z | 2023-01-20T18:27:32Z |
PERF: Improve performance for array equal fast | diff --git a/pandas/_libs/lib.pyi b/pandas/_libs/lib.pyi
index 2439082bf7413..72b46d9e30684 100644
--- a/pandas/_libs/lib.pyi
+++ b/pandas/_libs/lib.pyi
@@ -240,6 +240,6 @@ def get_reverse_indexer(
) -> npt.NDArray[np.intp]: ...
def is_bool_list(obj: list) -> bool: ...
def dtypes_all_equal(types: list[DtypeObj]) -> bool: ...
-def array_equal_fast(
- left: np.ndarray, right: np.ndarray # np.ndarray[np.int64, ndim=1]
+def is_range_indexer(
+ left: np.ndarray, n: int # np.ndarray[np.int64, ndim=1]
) -> bool: ...
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 176307ef27cff..16d5bbaad9de9 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -650,22 +650,20 @@ ctypedef fused int6432_t:
@cython.wraparound(False)
@cython.boundscheck(False)
-def array_equal_fast(
- ndarray[int6432_t, ndim=1] left, ndarray[int6432_t, ndim=1] right,
-) -> bool:
+def is_range_indexer(ndarray[int6432_t, ndim=1] left, int n) -> bool:
"""
Perform an element by element comparison on 1-d integer arrays, meant for indexer
comparisons
"""
cdef:
- Py_ssize_t i, n = left.size
+ Py_ssize_t i
- if left.size != right.size:
+ if left.size != n:
return False
for i in range(n):
- if left[i] != right[i]:
+ if left[i] != i:
return False
return True
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index eb0eb34dbefc4..3b122eaa814e5 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -48,7 +48,7 @@
from pandas._libs.hashtable import duplicated
from pandas._libs.lib import (
NoDefault,
- array_equal_fast,
+ is_range_indexer,
no_default,
)
from pandas._typing import (
@@ -6724,7 +6724,7 @@ def sort_values(
else:
return self.copy(deep=None)
- if array_equal_fast(indexer, np.arange(0, len(indexer), dtype=indexer.dtype)):
+ if is_range_indexer(indexer, len(indexer)):
if inplace:
return self._update_inplace(self)
else:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index ab9b76fbdf712..a91c46d7d06c4 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -35,7 +35,7 @@
)
from pandas._libs import lib
-from pandas._libs.lib import array_equal_fast
+from pandas._libs.lib import is_range_indexer
from pandas._libs.tslibs import (
Period,
Tick,
@@ -3780,10 +3780,7 @@ def _take(
axis == 0
and indices.ndim == 1
and using_copy_on_write()
- and array_equal_fast(
- indices,
- np.arange(0, len(self), dtype=np.intp),
- )
+ and is_range_indexer(indices, len(self))
):
return self.copy(deep=None)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 91f7095e59db5..2849b009cf72c 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -33,7 +33,7 @@
reshape,
)
from pandas._libs.lib import (
- array_equal_fast,
+ is_range_indexer,
no_default,
)
from pandas._typing import (
@@ -891,7 +891,7 @@ def take(self, indices, axis: Axis = 0, **kwargs) -> Series:
if (
indices.ndim == 1
and using_copy_on_write()
- and array_equal_fast(indices, np.arange(0, len(self), dtype=indices.dtype))
+ and is_range_indexer(indices, len(self))
):
return self.copy(deep=None)
@@ -3566,9 +3566,7 @@ def sort_values(
values_to_sort = ensure_key_mapped(self, key)._values if key else self._values
sorted_index = nargsort(values_to_sort, kind, bool(ascending), na_position)
- if array_equal_fast(
- sorted_index, np.arange(0, len(sorted_index), dtype=sorted_index.dtype)
- ):
+ if is_range_indexer(sorted_index, len(sorted_index)):
if inplace:
return self._update_inplace(self)
return self.copy(deep=None)
diff --git a/pandas/tests/libs/test_lib.py b/pandas/tests/libs/test_lib.py
index e352250dc748d..302dc21ec997c 100644
--- a/pandas/tests/libs/test_lib.py
+++ b/pandas/tests/libs/test_lib.py
@@ -244,25 +244,22 @@ def test_get_reverse_indexer(self):
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", ["int64", "int32"])
- def test_array_equal_fast(self, dtype):
+ def test_is_range_indexer(self, dtype):
# GH#50592
- left = np.arange(1, 100, dtype=dtype)
- right = np.arange(1, 100, dtype=dtype)
- assert lib.array_equal_fast(left, right)
+ left = np.arange(0, 100, dtype=dtype)
+ assert lib.is_range_indexer(left, 100)
@pytest.mark.parametrize("dtype", ["int64", "int32"])
- def test_array_equal_fast_not_equal(self, dtype):
+ def test_is_range_indexer_not_equal(self, dtype):
# GH#50592
left = np.array([1, 2], dtype=dtype)
- right = np.array([2, 2], dtype=dtype)
- assert not lib.array_equal_fast(left, right)
+ assert not lib.is_range_indexer(left, 2)
@pytest.mark.parametrize("dtype", ["int64", "int32"])
- def test_array_equal_fast_not_equal_shape(self, dtype):
+ def test_is_range_indexer_not_equal_shape(self, dtype):
# GH#50592
- left = np.array([1, 2, 3], dtype=dtype)
- right = np.array([2, 2], dtype=dtype)
- assert not lib.array_equal_fast(left, right)
+ left = np.array([0, 1, 2], dtype=dtype)
+ assert not lib.is_range_indexer(left, 2)
def test_cache_readonly_preserve_docstrings():
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
gives us a 50% boost if everything is equal and reduces it to nanoseconds if the first non-equal element is within the first couple elements compared to 500 microseconds | https://api.github.com/repos/pandas-dev/pandas/pulls/50850 | 2023-01-18T23:18:25Z | 2023-01-20T17:03:03Z | 2023-01-20T17:03:02Z | 2023-01-20T17:03:06Z |
ENH: Add ea support to get_dummies | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index bbecf3fee01f3..ff21a68d31f92 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -158,6 +158,7 @@ Other enhancements
- Added ``name`` parameter to :meth:`IntervalIndex.from_breaks`, :meth:`IntervalIndex.from_arrays` and :meth:`IntervalIndex.from_tuples` (:issue:`48911`)
- Improve exception message when using :func:`assert_frame_equal` on a :class:`DataFrame` to include the column that is compared (:issue:`50323`)
- Improved error message for :func:`merge_asof` when join-columns were duplicated (:issue:`50102`)
+- Added support for extension array dtypes to :func:`get_dummies` (:func:`32430`)
- Added :meth:`Index.infer_objects` analogous to :meth:`Series.infer_objects` (:issue:`50034`)
- Added ``copy`` parameter to :meth:`Series.infer_objects` and :meth:`DataFrame.infer_objects`, passing ``False`` will avoid making copies for series or columns that are already non-object or where no better dtype can be inferred (:issue:`50096`)
- :meth:`DataFrame.plot.hist` now recognizes ``xlabel`` and ``ylabel`` arguments (:issue:`49793`)
diff --git a/pandas/core/reshape/encoding.py b/pandas/core/reshape/encoding.py
index 7e45e587ca84a..2aa1a3001fb6b 100644
--- a/pandas/core/reshape/encoding.py
+++ b/pandas/core/reshape/encoding.py
@@ -16,6 +16,7 @@
is_integer_dtype,
is_list_like,
is_object_dtype,
+ pandas_dtype,
)
from pandas.core.arrays import SparseArray
@@ -240,9 +241,9 @@ def _get_dummies_1d(
if dtype is None:
dtype = np.dtype(bool)
- dtype = np.dtype(dtype)
+ _dtype = pandas_dtype(dtype)
- if is_object_dtype(dtype):
+ if is_object_dtype(_dtype):
raise ValueError("dtype=object is not a valid dtype for get_dummies")
def get_empty_frame(data) -> DataFrame:
@@ -317,7 +318,12 @@ def get_empty_frame(data) -> DataFrame:
else:
# take on axis=1 + transpose to ensure ndarray layout is column-major
- dummy_mat = np.eye(number_of_cols, dtype=dtype).take(codes, axis=1).T
+ eye_dtype: NpDtype
+ if isinstance(_dtype, np.dtype):
+ eye_dtype = _dtype
+ else:
+ eye_dtype = np.bool_
+ dummy_mat = np.eye(number_of_cols, dtype=eye_dtype).take(codes, axis=1).T
if not dummy_na:
# reset NaN GH4446
@@ -327,7 +333,7 @@ def get_empty_frame(data) -> DataFrame:
# remove first GH12042
dummy_mat = dummy_mat[:, 1:]
dummy_cols = dummy_cols[1:]
- return DataFrame(dummy_mat, index=index, columns=dummy_cols)
+ return DataFrame(dummy_mat, index=index, columns=dummy_cols, dtype=_dtype)
def from_dummies(
diff --git a/pandas/tests/reshape/test_get_dummies.py b/pandas/tests/reshape/test_get_dummies.py
index 8a7985280eff4..ed4da9562aeee 100644
--- a/pandas/tests/reshape/test_get_dummies.py
+++ b/pandas/tests/reshape/test_get_dummies.py
@@ -657,3 +657,23 @@ def test_get_dummies_with_string_values(self, values):
with pytest.raises(TypeError, match=msg):
get_dummies(df, columns=values)
+
+ def test_get_dummies_ea_dtype_series(self, any_numeric_ea_dtype):
+ # GH#32430
+ ser = Series(list("abca"))
+ result = get_dummies(ser, dtype=any_numeric_ea_dtype)
+ expected = DataFrame(
+ {"a": [1, 0, 0, 1], "b": [0, 1, 0, 0], "c": [0, 0, 1, 0]},
+ dtype=any_numeric_ea_dtype,
+ )
+ tm.assert_frame_equal(result, expected)
+
+ def test_get_dummies_ea_dtype_dataframe(self, any_numeric_ea_dtype):
+ # GH#32430
+ df = DataFrame({"x": list("abca")})
+ result = get_dummies(df, dtype=any_numeric_ea_dtype)
+ expected = DataFrame(
+ {"x_a": [1, 0, 0, 1], "x_b": [0, 1, 0, 0], "x_c": [0, 0, 1, 0]},
+ dtype=any_numeric_ea_dtype,
+ )
+ tm.assert_frame_equal(result, expected)
| - [x] closes #32430 (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50849 | 2023-01-18T23:02:43Z | 2023-01-23T18:14:11Z | 2023-01-23T18:14:11Z | 2023-04-18T01:52:35Z |
Revert "BUG/PERF: Series.replace with dtype="category"" | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 614832c5acd1b..7054d93457264 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -844,7 +844,6 @@ Performance improvements
- Performance improvement in :func:`merge` and :meth:`DataFrame.join` when joining on a sorted :class:`MultiIndex` (:issue:`48504`)
- Performance improvement in :func:`to_datetime` when parsing strings with timezone offsets (:issue:`50107`)
- Performance improvement in :meth:`DataFrame.loc` and :meth:`Series.loc` for tuple-based indexing of a :class:`MultiIndex` (:issue:`48384`)
-- Performance improvement for :meth:`Series.replace` with categorical dtype (:issue:`49404`)
- Performance improvement for :meth:`MultiIndex.unique` (:issue:`48335`)
- Performance improvement for :func:`concat` with extension array backed indexes (:issue:`49128`, :issue:`49178`)
- Reduce memory usage of :meth:`DataFrame.to_pickle`/:meth:`Series.to_pickle` when using BZ2 or LZMA (:issue:`49068`)
@@ -887,8 +886,6 @@ Bug fixes
Categorical
^^^^^^^^^^^
- Bug in :meth:`Categorical.set_categories` losing dtype information (:issue:`48812`)
-- Bug in :meth:`Series.replace` with categorical dtype when ``to_replace`` values overlap with new values (:issue:`49404`)
-- Bug in :meth:`Series.replace` with categorical dtype losing nullable dtypes of underlying categories (:issue:`49404`)
- Bug in :meth:`DataFrame.groupby` and :meth:`Series.groupby` would reorder categories when used as a grouper (:issue:`48749`)
- Bug in :class:`Categorical` constructor when constructing from a :class:`Categorical` object and ``dtype="category"`` losing ordered-ness (:issue:`49309`)
-
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 64fdc7949f96b..14f334d72dbb1 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -2273,24 +2273,42 @@ def isin(self, values) -> npt.NDArray[np.bool_]:
return algorithms.isin(self.codes, code_values)
def _replace(self, *, to_replace, value, inplace: bool = False):
- from pandas import (
- Index,
- Series,
- )
-
inplace = validate_bool_kwarg(inplace, "inplace")
cat = self if inplace else self.copy()
- ser = Series(cat.categories, copy=True)
- ser = ser.replace(to_replace=to_replace, value=value)
+ # other cases, like if both to_replace and value are list-like or if
+ # to_replace is a dict, are handled separately in NDFrame
+ if not is_list_like(to_replace):
+ to_replace = [to_replace]
+
+ categories = cat.categories.tolist()
+ removals = set()
+ for replace_value in to_replace:
+ if value == replace_value:
+ continue
+ if replace_value not in cat.categories:
+ continue
+ if isna(value):
+ removals.add(replace_value)
+ continue
+
+ index = categories.index(replace_value)
+
+ if value in cat.categories:
+ value_index = categories.index(value)
+ cat._codes[cat._codes == index] = value_index
+ removals.add(replace_value)
+ else:
+ categories[index] = value
+ cat._set_categories(categories)
- all_values = Index(ser)
- new_categories = Index(ser.dropna().drop_duplicates(keep="first"))
- new_codes = recode_for_categories(
- cat._codes, all_values, new_categories, copy=False
- )
- new_dtype = CategoricalDtype(new_categories, ordered=self.dtype.ordered)
- NDArrayBacked.__init__(cat, new_codes, new_dtype)
+ if len(removals):
+ new_categories = [c for c in categories if c not in removals]
+ new_dtype = CategoricalDtype(new_categories, ordered=self.dtype.ordered)
+ codes = recode_for_categories(
+ cat.codes, cat.categories, new_dtype.categories
+ )
+ NDArrayBacked.__init__(cat, codes, new_dtype)
if not inplace:
return cat
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 6056cada27069..4bb4882574228 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -536,10 +536,12 @@ def replace(
if isinstance(values, Categorical):
# TODO: avoid special-casing
- # GH49404
blk = self if inplace else self.copy()
- values = cast(Categorical, blk.values)
- values._replace(to_replace=to_replace, value=value, inplace=True)
+ # error: Item "ExtensionArray" of "Union[ndarray[Any, Any],
+ # ExtensionArray]" has no attribute "_replace"
+ blk.values._replace( # type: ignore[union-attr]
+ to_replace=to_replace, value=value, inplace=True
+ )
return [blk]
if not self._can_hold_element(to_replace):
@@ -649,14 +651,6 @@ def replace_list(
"""
values = self.values
- if isinstance(values, Categorical):
- # TODO: avoid special-casing
- # GH49404
- blk = self if inplace else self.copy()
- values = cast(Categorical, blk.values)
- values._replace(to_replace=src_list, value=dest_list, inplace=True)
- return [blk]
-
# Exclude anything that we know we won't contain
pairs = [
(x, y) for x, y in zip(src_list, dest_list) if self._can_hold_element(x)
diff --git a/pandas/tests/arrays/categorical/test_replace.py b/pandas/tests/arrays/categorical/test_replace.py
index 62a7bf0673a16..a3ba420c84a17 100644
--- a/pandas/tests/arrays/categorical/test_replace.py
+++ b/pandas/tests/arrays/categorical/test_replace.py
@@ -21,8 +21,6 @@
((5, 6), 2, [1, 2, 3], False),
([1], [2], [2, 2, 3], False),
([1, 4], [5, 2], [5, 2, 3], False),
- # GH49404
- ([1, 2, 3], [2, 3, 4], [2, 3, 4], False),
# check_categorical sorts categories, which crashes on mixed dtypes
(3, "4", [1, 2, "4"], False),
([1, 2, "3"], "5", ["5", "5", 3], True),
@@ -67,11 +65,3 @@ def test_replace_categorical(to_replace, value, result, expected_error_msg):
pd.Series(cat).replace(to_replace, value, inplace=True)
tm.assert_categorical_equal(cat, expected)
-
-
-def test_replace_categorical_ea_dtype():
- # GH49404
- cat = Categorical(pd.array(["a", "b"], dtype="string"))
- result = pd.Series(cat).replace(["a", "b"], ["c", pd.NA])._values
- expected = Categorical(pd.array(["c", pd.NA], dtype="string"))
- tm.assert_categorical_equal(result, expected)
| Reverts pandas-dev/pandas#49404 | https://api.github.com/repos/pandas-dev/pandas/pulls/50848 | 2023-01-18T22:36:40Z | 2023-01-18T23:24:04Z | 2023-01-18T23:24:04Z | 2023-01-19T00:10:08Z |
BUG: read_csv overflowing for ea int with nulls | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 7054d93457264..b95f799750367 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -1023,6 +1023,7 @@ I/O
- Bug in :meth:`DataFrame.to_string` ignoring float formatter for extension arrays (:issue:`39336`)
- Fixed memory leak which stemmed from the initialization of the internal JSON module (:issue:`49222`)
- Fixed issue where :func:`json_normalize` would incorrectly remove leading characters from column names that matched the ``sep`` argument (:issue:`49861`)
+- Bug in :func:`read_csv` unnecessarily overflowing for extension array dtype when containing ``NA`` (:issue:`32134`)
- Bug in :meth:`DataFrame.to_dict` not converting ``NA`` to ``None`` (:issue:`50795`)
- Bug in :meth:`DataFrame.to_json` where it would segfault when failing to encode a string (:issue:`50307`)
- Bug in :func:`read_xml` where file-like objects failed when iterparse is used (:issue:`50641`)
diff --git a/pandas/core/arrays/numeric.py b/pandas/core/arrays/numeric.py
index 6ae2c3a2e2749..2d9a3ae63259d 100644
--- a/pandas/core/arrays/numeric.py
+++ b/pandas/core/arrays/numeric.py
@@ -285,7 +285,7 @@ def _from_sequence_of_strings(
) -> T:
from pandas.core.tools.numeric import to_numeric
- scalars = to_numeric(strings, errors="raise")
+ scalars = to_numeric(strings, errors="raise", use_nullable_dtypes=True)
return cls._from_sequence(scalars, dtype=dtype, copy=copy)
_HANDLED_TYPES = (np.ndarray, numbers.Number)
diff --git a/pandas/tests/io/parser/dtypes/test_dtypes_basic.py b/pandas/tests/io/parser/dtypes/test_dtypes_basic.py
index 8fd08122f0834..52b142d81cd5e 100644
--- a/pandas/tests/io/parser/dtypes/test_dtypes_basic.py
+++ b/pandas/tests/io/parser/dtypes/test_dtypes_basic.py
@@ -18,6 +18,7 @@
import pandas._testing as tm
from pandas.core.arrays import (
ArrowStringArray,
+ IntegerArray,
StringArray,
)
@@ -527,3 +528,23 @@ def test_use_nullable_dtypes_pyarrow_backend(all_parsers, request):
}
)
tm.assert_frame_equal(result, expected)
+
+
+def test_ea_int_avoid_overflow(all_parsers):
+ # GH#32134
+ parser = all_parsers
+ data = """a,b
+1,1
+,1
+1582218195625938945,1
+"""
+ result = parser.read_csv(StringIO(data), dtype={"a": "Int64"})
+ expected = DataFrame(
+ {
+ "a": IntegerArray(
+ np.array([1, 1, 1582218195625938945]), np.array([False, True, False])
+ ),
+ "b": 1,
+ }
+ )
+ tm.assert_frame_equal(result, expected)
| - [x] closes #32134 (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50847 | 2023-01-18T22:33:00Z | 2023-01-19T18:04:50Z | 2023-01-19T18:04:50Z | 2023-01-19T20:21:26Z |
BUG: groupby.describe on a frame with duplicate column names | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index dacf333e0fcc0..07524fbf49652 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -1263,6 +1263,7 @@ Groupby/resample/rolling
- Bug in :meth:`.SeriesGroupBy.value_counts` did not respect ``sort=False`` (:issue:`50482`)
- Bug in :meth:`.DataFrameGroupBy.resample` raises ``KeyError`` when getting the result from a key list when resampling on time index (:issue:`50840`)
- Bug in :meth:`.DataFrameGroupBy.transform` and :meth:`.SeriesGroupBy.transform` would raise incorrectly when grouper had ``axis=1`` for ``"ngroup"`` argument (:issue:`45986`)
+- Bug in :meth:`.DataFrameGroupBy.describe` produced incorrect results when data had duplicate columns (:issue:`50806`)
-
Reshaping
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index 1ef97fed7ba05..1fcbc7c305a06 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -1256,6 +1256,27 @@ def test_describe_with_duplicate_output_column_names(as_index, keys):
tm.assert_frame_equal(result, expected)
+def test_describe_duplicate_columns():
+ # GH#50806
+ df = DataFrame([[0, 1, 2, 3]])
+ df.columns = [0, 1, 2, 0]
+ gb = df.groupby(df[1])
+ result = gb.describe(percentiles=[])
+
+ columns = ["count", "mean", "std", "min", "50%", "max"]
+ frames = [
+ DataFrame([[1.0, val, np.nan, val, val, val]], index=[1], columns=columns)
+ for val in (0.0, 2.0, 3.0)
+ ]
+ expected = pd.concat(frames, axis=1)
+ expected.columns = MultiIndex(
+ levels=[[0, 2], columns],
+ codes=[6 * [0] + 6 * [1] + 6 * [0], 3 * list(range(6))],
+ )
+ expected.index.names = [1]
+ tm.assert_frame_equal(result, expected)
+
+
def test_groupby_mean_no_overflow():
# Regression test for (#22487)
df = DataFrame(
@@ -1596,3 +1617,22 @@ def test_multiindex_group_all_columns_when_empty(groupby_func):
result = method(*args).index
expected = df.index
tm.assert_index_equal(result, expected)
+
+
+def test_duplicate_columns(request, groupby_func, as_index):
+ # GH#50806
+ if groupby_func == "corrwith":
+ msg = "GH#50845 - corrwith fails when there are duplicate columns"
+ request.node.add_marker(pytest.mark.xfail(reason=msg))
+ df = DataFrame([[1, 3, 6], [1, 4, 7], [2, 5, 8]], columns=list("abb"))
+ args = get_groupby_method_args(groupby_func, df)
+ gb = df.groupby("a", as_index=as_index)
+ result = getattr(gb, groupby_func)(*args)
+
+ expected_df = df.set_axis(["a", "b", "c"], axis=1)
+ expected_args = get_groupby_method_args(groupby_func, expected_df)
+ expected_gb = expected_df.groupby("a", as_index=as_index)
+ expected = getattr(expected_gb, groupby_func)(*expected_args)
+ if groupby_func not in ("size", "ngroup", "cumcount"):
+ expected = expected.rename(columns={"c": "b"})
+ tm.assert_equal(result, expected)
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 4861b7c90d1bb..d7b015fa7104a 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -2828,3 +2828,13 @@ def test_groupby_reduce_period():
expected = ser[:10]
expected.index = Index(range(10), dtype=np.int_)
tm.assert_series_equal(res, expected)
+
+
+def test_obj_with_exclusions_duplicate_columns():
+ # GH#50806
+ df = DataFrame([[0, 1, 2, 3]])
+ df.columns = [0, 1, 2, 0]
+ gb = df.groupby(df[1])
+ result = gb._obj_with_exclusions
+ expected = df.take([0, 2, 3], axis=1)
+ tm.assert_frame_equal(result, expected)
| - [x] closes #50806 (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
ASVs are below.
```
before after ratio
[a063af0e] [185e4f8e]
<groupby_select_obj_dup_cols~2> <groupby_select_obj_dup_cols>
- 769±10μs 694±6μs 0.90 groupby.GroupByMethods.time_dtype_as_group('uint', 'first', 'transformation', 5)
- 758±3μs 683±9μs 0.90 groupby.GroupByMethods.time_dtype_as_group('int16', 'last', 'transformation', 5)
- 11.9±0.04ms 10.7±0.2ms 0.90 groupby.AggEngine.time_dataframe_cython(True)
- 846±4μs 759±6μs 0.90 groupby.GroupByMethods.time_dtype_as_group('float', 'min', 'transformation', 5)
- 857±5μs 769±3μs 0.90 groupby.GroupByMethods.time_dtype_as_group('float', 'max', 'transformation', 5)
- 758±3μs 678±2μs 0.89 groupby.GroupByMethods.time_dtype_as_group('datetime', 'last', 'transformation', 5)
- 763±3μs 682±8μs 0.89 groupby.GroupByMethods.time_dtype_as_group('datetime', 'first', 'transformation', 5)
- 766±1μs 683±4μs 0.89 groupby.GroupByMethods.time_dtype_as_group('datetime', 'max', 'transformation', 5)
- 775±2μs 691±7μs 0.89 groupby.GroupByMethods.time_dtype_as_group('int16', 'min', 'transformation', 5)
- 860±8μs 766±7μs 0.89 groupby.GroupByMethods.time_dtype_as_group('float', 'sum', 'transformation', 5)
- 775±3μs 689±8μs 0.89 groupby.GroupByMethods.time_dtype_as_group('int16', 'first', 'transformation', 5)
- 847±4μs 752±9μs 0.89 groupby.GroupByMethods.time_dtype_as_group('float', 'prod', 'transformation', 5)
- 759±2μs 674±2μs 0.89 groupby.GroupByMethods.time_dtype_as_group('uint', 'prod', 'transformation', 5)
- 709±3μs 627±10μs 0.88 groupby.GroupByMethods.time_dtype_as_group('object', 'last', 'transformation', 5)
- 765±2μs 676±4μs 0.88 groupby.GroupByMethods.time_dtype_as_group('int16', 'prod', 'transformation', 5)
- 854±10μs 754±10μs 0.88 groupby.GroupByMethods.time_dtype_as_group('float', 'first', 'transformation', 5)
- 766±4μs 675±20μs 0.88 groupby.GroupByMethods.time_dtype_as_group('uint', 'max', 'transformation', 5)
- 775±3μs 683±6μs 0.88 groupby.GroupByMethods.time_dtype_as_group('datetime', 'min', 'transformation', 5)
- 670±1μs 590±9μs 0.88 groupby.GroupByMethods.time_dtype_as_group('uint', 'cumcount', 'transformation', 5)
- 767±4μs 674±20μs 0.88 groupby.GroupByMethods.time_dtype_as_group('int16', 'sum', 'transformation', 5)
- 777±2μs 683±10μs 0.88 groupby.GroupByMethods.time_dtype_as_group('int', 'first', 'transformation', 5)
- 858±7μs 753±7μs 0.88 groupby.GroupByMethods.time_dtype_as_group('float', 'last', 'transformation', 5)
- 772±5μs 678±1μs 0.88 groupby.GroupByMethods.time_dtype_as_group('int', 'last', 'transformation', 5)
- 661±2μs 579±1μs 0.88 groupby.GroupByMethods.time_dtype_as_group('int16', 'cumcount', 'transformation', 5)
- 588±2μs 513±1μs 0.87 groupby.GroupByMethods.time_dtype_as_group('object', 'cumcount', 'transformation', 5)
- 779±3μs 680±9μs 0.87 groupby.GroupByMethods.time_dtype_as_group('int16', 'max', 'transformation', 5)
- 781±3μs 677±20μs 0.87 groupby.GroupByMethods.time_dtype_as_group('int', 'max', 'transformation', 5)
- 772±10μs 669±9μs 0.87 groupby.GroupByMethods.time_dtype_as_group('int', 'sum', 'transformation', 5)
- 663±3μs 574±9μs 0.87 groupby.GroupByMethods.time_dtype_as_group('int', 'cumcount', 'transformation', 5)
- 769±7μs 665±20μs 0.86 groupby.GroupByMethods.time_dtype_as_group('uint', 'sum', 'transformation', 5)
- 770±7μs 666±20μs 0.86 groupby.GroupByMethods.time_dtype_as_group('int', 'prod', 'transformation', 5)
- 645±2μs 557±1μs 0.86 groupby.GroupByMethods.time_dtype_as_group('datetime', 'cumcount', 'transformation', 5)
- 729±4μs 629±10μs 0.86 groupby.GroupByMethods.time_dtype_as_group('float', 'cumcount', 'transformation', 5)
- 779±2μs 673±20μs 0.86 groupby.GroupByMethods.time_dtype_as_group('uint', 'min', 'transformation', 5)
- 717±3μs 618±20μs 0.86 groupby.GroupByMethods.time_dtype_as_group('object', 'first', 'transformation', 5)
- 781±2μs 665±20μs 0.85 groupby.GroupByMethods.time_dtype_as_group('int', 'min', 'transformation', 5)
- 782±20μs 665±20μs 0.85 groupby.GroupByMethods.time_dtype_as_group('uint', 'last', 'transformation', 5)
- 580±6μs 491±7μs 0.85 groupby.TransformNaN.time_first
- 448±2μs 353±10μs 0.79 groupby.SumBools.time_groupby_sum_booleans
- 374±1μs 292±1μs 0.78 groupby.GroupByMethods.time_dtype_as_group('int', 'cumcount', 'direct', 5)
- 372±3μs 284±2μs 0.76 groupby.GroupByMethods.time_dtype_as_group('datetime', 'cumcount', 'direct', 5)
- 358±3μs 272±1μs 0.76 groupby.GroupByMethods.time_dtype_as_group('object', 'cumcount', 'direct', 5)
- 376±7μs 283±0.7μs 0.75 groupby.GroupByMethods.time_dtype_as_group('uint', 'cumcount', 'direct', 5)
- 374±2μs 279±3μs 0.75 groupby.GroupByMethods.time_dtype_as_group('float', 'cumcount', 'direct', 5)
- 382±7μs 283±7μs 0.74 groupby.GroupByMethods.time_dtype_as_group('int16', 'cumcount', 'direct', 5)
- 181±2μs 85.9±1μs 0.47 groupby.GroupByMethods.time_dtype_as_group('float', 'sum', 'direct', 5)
- 175±3μs 82.9±0.3μs 0.47 groupby.GroupByMethods.time_dtype_as_group('int', 'sum', 'direct', 5)
- 175±2μs 82.9±0.2μs 0.47 groupby.GroupByMethods.time_dtype_as_group('int16', 'first', 'direct', 5)
- 178±0.2μs 84.1±0.9μs 0.47 groupby.GroupByMethods.time_dtype_as_group('int', 'first', 'direct', 5)
- 180±3μs 83.8±2μs 0.47 groupby.GroupByMethods.time_dtype_as_group('float', 'max', 'direct', 5)
- 177±0.9μs 82.3±0.2μs 0.47 groupby.GroupByMethods.time_dtype_as_group('uint', 'min', 'direct', 5)
- 175±1μs 81.4±1μs 0.47 groupby.GroupByMethods.time_dtype_as_group('int16', 'sum', 'direct', 5)
- 179±1μs 83.2±0.9μs 0.46 groupby.GroupByMethods.time_dtype_as_group('int16', 'max', 'direct', 5)
- 179±3μs 82.7±1μs 0.46 groupby.GroupByMethods.time_dtype_as_group('uint', 'max', 'direct', 5)
- 181±2μs 83.6±0.6μs 0.46 groupby.GroupByMethods.time_dtype_as_group('float', 'min', 'direct', 5)
- 180±1μs 82.9±0.6μs 0.46 groupby.GroupByMethods.time_dtype_as_group('uint', 'first', 'direct', 5)
- 178±1μs 81.9±0.3μs 0.46 groupby.GroupByMethods.time_dtype_as_group('int', 'min', 'direct', 5)
- 178±1μs 81.4±0.7μs 0.46 groupby.GroupByMethods.time_dtype_as_group('float', 'last', 'direct', 5)
- 177±1μs 81.3±0.5μs 0.46 groupby.GroupByMethods.time_dtype_as_group('uint', 'sum', 'direct', 5)
- 180±2μs 82.4±0.1μs 0.46 groupby.GroupByMethods.time_dtype_as_group('int', 'max', 'direct', 5)
- 176±1μs 80.6±2μs 0.46 groupby.GroupByMethods.time_dtype_as_group('datetime', 'max', 'direct', 5)
- 170±0.4μs 77.5±1μs 0.46 groupby.GroupByMethods.time_dtype_as_group('datetime', 'last', 'direct', 5)
- 175±3μs 79.7±0.2μs 0.46 groupby.GroupByMethods.time_dtype_as_group('float', 'prod', 'direct', 5)
- 179±1μs 81.5±0.3μs 0.46 groupby.GroupByMethods.time_dtype_as_group('int16', 'min', 'direct', 5)
- 179±4μs 81.5±2μs 0.45 groupby.GroupByMethods.time_dtype_as_group('float', 'first', 'direct', 5)
- 174±0.5μs 79.0±3μs 0.45 groupby.GroupByMethods.time_dtype_as_group('datetime', 'first', 'direct', 5)
- 175±1μs 79.4±2μs 0.45 groupby.GroupByMethods.time_dtype_as_group('datetime', 'min', 'direct', 5)
- 172±2μs 77.2±1μs 0.45 groupby.GroupByMethods.time_dtype_as_group('int', 'last', 'direct', 5)
- 170±2μs 75.6±0.3μs 0.44 groupby.GroupByMethods.time_dtype_as_group('uint', 'prod', 'direct', 5)
- 170±0.9μs 75.5±0.3μs 0.44 groupby.GroupByMethods.time_dtype_as_group('int', 'prod', 'direct', 5)
- 170±2μs 75.5±0.7μs 0.44 groupby.GroupByMethods.time_dtype_as_group('int16', 'prod', 'direct', 5)
- 172±4μs 76.2±0.2μs 0.44 groupby.GroupByMethods.time_dtype_as_group('int16', 'last', 'direct', 5)
- 175±0.6μs 75.7±2μs 0.43 groupby.GroupByMethods.time_dtype_as_group('uint', 'last', 'direct', 5)
- 158±1μs 66.0±2μs 0.42 groupby.GroupByMethods.time_dtype_as_group('object', 'first', 'direct', 5)
- 160±2μs 65.5±1μs 0.41 groupby.GroupByMethods.time_dtype_as_group('object', 'last', 'direct', 5)
SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY.
PERFORMANCE INCREASED.
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/50846 | 2023-01-18T22:30:27Z | 2023-02-03T18:12:48Z | 2023-02-03T18:12:48Z | 2023-04-02T14:21:55Z |
ENH: Add ignore_index to Series.drop_duplicates | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 7054d93457264..b5b466edaccbe 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -161,6 +161,7 @@ Other enhancements
- Added :meth:`Index.infer_objects` analogous to :meth:`Series.infer_objects` (:issue:`50034`)
- Added ``copy`` parameter to :meth:`Series.infer_objects` and :meth:`DataFrame.infer_objects`, passing ``False`` will avoid making copies for series or columns that are already non-object or where no better dtype can be inferred (:issue:`50096`)
- :meth:`DataFrame.plot.hist` now recognizes ``xlabel`` and ``ylabel`` arguments (:issue:`49793`)
+- :meth:`Series.drop_duplicates` has gained ``ignore_index`` keyword to reset index (:issue:`48304`)
- Improved error message in :func:`to_datetime` for non-ISO8601 formats, informing users about the position of the first error (:issue:`50361`)
- Improved error message when trying to align :class:`DataFrame` objects (for example, in :func:`DataFrame.compare`) to clarify that "identically labelled" refers to both index and columns (:issue:`50083`)
- Added :meth:`DatetimeIndex.as_unit` and :meth:`TimedeltaIndex.as_unit` to convert to different resolutions; supported resolutions are "s", "ms", "us", and "ns" (:issue:`50616`)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 91f7095e59db5..465cc3ce41fad 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2131,22 +2131,32 @@ def unique(self) -> ArrayLike: # pylint: disable=useless-parent-delegation
@overload
def drop_duplicates(
- self, *, keep: DropKeep = ..., inplace: Literal[False] = ...
+ self,
+ *,
+ keep: DropKeep = ...,
+ inplace: Literal[False] = ...,
+ ignore_index: bool = ...,
) -> Series:
...
@overload
- def drop_duplicates(self, *, keep: DropKeep = ..., inplace: Literal[True]) -> None:
+ def drop_duplicates(
+ self, *, keep: DropKeep = ..., inplace: Literal[True], ignore_index: bool = ...
+ ) -> None:
...
@overload
def drop_duplicates(
- self, *, keep: DropKeep = ..., inplace: bool = ...
+ self, *, keep: DropKeep = ..., inplace: bool = ..., ignore_index: bool = ...
) -> Series | None:
...
def drop_duplicates(
- self, *, keep: DropKeep = "first", inplace: bool = False
+ self,
+ *,
+ keep: DropKeep = "first",
+ inplace: bool = False,
+ ignore_index: bool = False,
) -> Series | None:
"""
Return Series with duplicate values removed.
@@ -2163,6 +2173,11 @@ def drop_duplicates(
inplace : bool, default ``False``
If ``True``, performs operation inplace and returns None.
+ ignore_index : bool, default ``False``
+ If ``True``, the resulting axis will be labeled 0, 1, …, n - 1.
+
+ .. versionadded:: 2.0.0
+
Returns
-------
Series or None
@@ -2225,6 +2240,10 @@ def drop_duplicates(
"""
inplace = validate_bool_kwarg(inplace, "inplace")
result = super().drop_duplicates(keep=keep)
+
+ if ignore_index:
+ result.index = default_index(len(result))
+
if inplace:
self._update_inplace(result)
return None
diff --git a/pandas/tests/series/methods/test_drop_duplicates.py b/pandas/tests/series/methods/test_drop_duplicates.py
index 698430095b453..7e4503be2ec47 100644
--- a/pandas/tests/series/methods/test_drop_duplicates.py
+++ b/pandas/tests/series/methods/test_drop_duplicates.py
@@ -242,3 +242,10 @@ def test_drop_duplicates_categorical_bool_na(self, nulls_fixture):
index=[0, 1, 4],
)
tm.assert_series_equal(result, expected)
+
+ def test_drop_duplicates_ignore_index(self):
+ # GH#48304
+ ser = Series([1, 2, 2, 3])
+ result = ser.drop_duplicates(ignore_index=True)
+ expected = Series([1, 2, 3])
+ tm.assert_series_equal(result, expected)
| - [x] closes #48304 (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50844 | 2023-01-18T21:47:34Z | 2023-01-19T18:20:25Z | 2023-01-19T18:20:25Z | 2023-01-19T20:21:42Z |
adding test to check if rows are skipped when skip_blank_lines is set… | diff --git a/pandas/tests/io/parser/common/test_common_basic.py b/pandas/tests/io/parser/common/test_common_basic.py
index 6656face3be84..86d3557d89c1f 100644
--- a/pandas/tests/io/parser/common/test_common_basic.py
+++ b/pandas/tests/io/parser/common/test_common_basic.py
@@ -862,3 +862,27 @@ def test_read_seek(all_parsers):
actual = parser.read_csv(file)
expected = parser.read_csv(StringIO(content))
tm.assert_frame_equal(actual, expected)
+
+
+@pytest.mark.xfail
+def test_read_csv_skip_blank_rows(all_parsers):
+ # GH22693
+ parser = all_parsers
+ csv_f = StringIO(
+ """A, B, C, D
+FOO, 1, 2, 3
+FOO, 4, 5, 6
+,,,
+FOO, 7, 8, 9
+, 10, 11, 12
+,,,
+"""
+ )
+
+ result = parser.read_csv(csv_f, skip_blank_lines=True)
+ expected = DataFrame(
+ [["FOO", 1, 2, 3], ["FOO", 4, 5, 6], ["FOO", 7, 8, 9], [np.nan, 10, 11, 12]],
+ columns=["A", "B", "C", "D"],
+ )
+
+ tm.assert_frame_equal(result, expected)
| … to true in read_csv
xref #22693
test added with xfail decorator
@noatamir @phofl | https://api.github.com/repos/pandas-dev/pandas/pulls/50843 | 2023-01-18T20:58:37Z | 2023-03-16T02:45:52Z | null | 2023-03-16T02:45:53Z |
Fix groupby-resample KeyError when resampling on Index and giving explicit list of columns. (1.5.x) | diff --git a/doc/source/whatsnew/v1.5.3.rst b/doc/source/whatsnew/v1.5.3.rst
index dae1e9b84cd67..879bad8ccbfb6 100644
--- a/doc/source/whatsnew/v1.5.3.rst
+++ b/doc/source/whatsnew/v1.5.3.rst
@@ -34,6 +34,7 @@ Bug fixes
- Fixed bug when instantiating a :class:`DataFrame` subclass inheriting from ``typing.Generic`` that triggered a ``UserWarning`` on python 3.11 (:issue:`49649`)
- Bug in :func:`pivot_table` with NumPy 1.24 or greater when the :class:`DataFrame` columns has nested elements (:issue:`50342`)
- Bug in :func:`pandas.testing.assert_series_equal` (and equivalent ``assert_`` functions) when having nested data and using numpy >= 1.25 (:issue:`50360`)
+- Bug in :meth:`DataFrameGroupBy.resample` raises ``KeyError`` when getting the result from a key list when resampling on time index (:issue:`50840`)
.. ---------------------------------------------------------------------------
.. _whatsnew_153.other:
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 0ac43b773bbf9..77fa574a2bd94 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -1238,7 +1238,7 @@ def _gotitem(self, key, ndim, subset=None):
# Try to select from a DataFrame, falling back to a Series
try:
- if isinstance(key, list) and self.key not in key:
+ if isinstance(key, list) and self.key not in key and self.key is not None:
key.append(self.key)
groupby = self._groupby[key]
except IndexError:
diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py
index 8aff217cca5c1..579e3ac110b13 100644
--- a/pandas/tests/resample/test_resampler_grouper.py
+++ b/pandas/tests/resample/test_resampler_grouper.py
@@ -497,3 +497,33 @@ def test_groupby_resample_with_list_of_keys():
),
)
tm.assert_frame_equal(result, expected)
+
+
+def test_groupby_resample_on_index_with_list_of_keys():
+ # GH 47362
+ df = DataFrame(
+ data={
+ "group": [0, 0, 0, 0, 1, 1, 1, 1],
+ "val": [3, 1, 4, 1, 5, 9, 2, 6],
+ },
+ index=Series(
+ date_range(start="2016-01-01", periods=8),
+ name="date",
+ ),
+ )
+ result = df.groupby("group").resample("2D")[["val"]].mean()
+ expected = DataFrame(
+ data={
+ "val": [2.0, 2.5, 7.0, 4.0],
+ },
+ index=Index(
+ data=[
+ (0, Timestamp("2016-01-01")),
+ (0, Timestamp("2016-01-03")),
+ (1, Timestamp("2016-01-05")),
+ (1, Timestamp("2016-01-07")),
+ ],
+ name=("group", "date"),
+ ),
+ )
+ tm.assert_frame_equal(result, expected)
| - [x] closes #50840
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50841 | 2023-01-18T20:24:36Z | 2023-01-19T22:34:51Z | null | 2023-01-27T14:35:29Z |
PERF: Avoid re-computing mask in nanmedian | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 1a071ab978de9..9366404445fb2 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -870,6 +870,7 @@ Performance improvements
- Performance improvement in :func:`read_html` when there are multiple tables (:issue:`49929`)
- Performance improvement in :func:`to_datetime` when using ``'%Y%m%d'`` format (:issue:`17410`)
- Performance improvement in :func:`to_datetime` when format is given or can be inferred (:issue:`50465`)
+- Performance improvement in :meth:`Series.median` for nullable dtypes (:issue:`50838`)
- Performance improvement in :func:`read_csv` when passing :func:`to_datetime` lambda-function to ``date_parser`` and inputs have mixed timezone offsetes (:issue:`35296`)
- Performance improvement in :meth:`.SeriesGroupBy.value_counts` with categorical dtype (:issue:`46202`)
- Fixed a reference leak in :func:`read_hdf` (:issue:`37441`)
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index 02372356d3fe4..61f1bcdef9568 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -746,16 +746,19 @@ def nanmedian(values, *, axis: AxisInt | None = None, skipna: bool = True, mask=
2.0
"""
- def get_median(x):
- mask = notna(x)
- if not skipna and not mask.all():
+ def get_median(x, _mask=None):
+ if _mask is None:
+ _mask = notna(x)
+ else:
+ _mask = ~_mask
+ if not skipna and not _mask.all():
return np.nan
with warnings.catch_warnings():
# Suppress RuntimeWarning about All-NaN slice
warnings.filterwarnings(
"ignore", "All-NaN slice encountered", RuntimeWarning
)
- res = np.nanmedian(x[mask])
+ res = np.nanmedian(x[_mask])
return res
values, mask, dtype, _, _ = _get_values(values, skipna, mask=mask)
@@ -796,7 +799,7 @@ def get_median(x):
else:
# otherwise return a scalar value
- res = get_median(values) if notempty else np.nan
+ res = get_median(values, mask) if notempty else np.nan
return _wrap_results(res, dtype)
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50838 | 2023-01-18T18:57:20Z | 2023-01-19T18:37:14Z | 2023-01-19T18:37:14Z | 2023-01-19T20:21:58Z |
WEB/CI: Use GITHUB_TOKEN when making Github API calls | diff --git a/web/pandas_web.py b/web/pandas_web.py
index e4568136edece..8c508a15f9a2b 100755
--- a/web/pandas_web.py
+++ b/web/pandas_web.py
@@ -43,6 +43,12 @@
import requests
import yaml
+api_token = os.environ.get("GITHUB_TOKEN")
+if api_token is not None:
+ GITHUB_API_HEADERS = {"Authorization": f"Bearer {api_token}"}
+else:
+ GITHUB_API_HEADERS = {}
+
class Preprocessors:
"""
@@ -168,7 +174,9 @@ def maintainers_add_info(context):
for user in (
context["maintainers"]["active"] + context["maintainers"]["inactive"]
):
- resp = requests.get(f"https://api.github.com/users/{user}")
+ resp = requests.get(
+ f"https://api.github.com/users/{user}", headers=GITHUB_API_HEADERS
+ )
if resp.status_code == 403:
sys.stderr.write(
"WARN: GitHub API quota exceeded when fetching maintainers\n"
@@ -199,7 +207,10 @@ def home_add_releases(context):
context["releases"] = []
github_repo_url = context["main"]["github_repo_url"]
- resp = requests.get(f"https://api.github.com/repos/{github_repo_url}/releases")
+ resp = requests.get(
+ f"https://api.github.com/repos/{github_repo_url}/releases",
+ headers=GITHUB_API_HEADERS,
+ )
if resp.status_code == 403:
sys.stderr.write("WARN: GitHub API quota exceeded when fetching releases\n")
resp_bkp = requests.get(context["main"]["production_url"] + "releases.json")
@@ -275,7 +286,8 @@ def roadmap_pdeps(context):
github_repo_url = context["main"]["github_repo_url"]
resp = requests.get(
"https://api.github.com/search/issues?"
- f"q=is:pr is:open label:PDEP repo:{github_repo_url}"
+ f"q=is:pr is:open label:PDEP repo:{github_repo_url}",
+ headers=GITHUB_API_HEADERS,
)
if resp.status_code == 403:
sys.stderr.write("WARN: GitHub API quota exceeded when fetching pdeps\n")
| Follow up to https://github.com/pandas-dev/pandas/pull/50811, might as well still make authenticated API calls to Github | https://api.github.com/repos/pandas-dev/pandas/pulls/50837 | 2023-01-18T18:47:23Z | 2023-01-19T02:20:50Z | 2023-01-19T02:20:50Z | 2023-01-20T03:25:11Z |
BUG: merge_asof with non-nano | diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index ac4e8934570ce..1aedc3a31e3e7 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -316,7 +316,7 @@ def _unbox_scalar(self, value) -> np.timedelta64:
raise ValueError("'value' should be a Timedelta.")
self._check_compatible_with(value)
if value is NaT:
- return np.timedelta64(value.value, "ns")
+ return np.timedelta64(value.value, self.unit)
else:
return value.as_unit(self.unit).asm8
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 8d009d25a66ba..7d8d7a37ff7e7 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -89,7 +89,10 @@
)
from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
import pandas.core.common as com
-from pandas.core.construction import extract_array
+from pandas.core.construction import (
+ ensure_wrapped_if_datetimelike,
+ extract_array,
+)
from pandas.core.frame import _merge_doc
from pandas.core.indexes.api import default_index
from pandas.core.sorting import is_int64_overflow_possible
@@ -2109,12 +2112,24 @@ def injection(obj):
# initial type conversion as needed
if needs_i8_conversion(left_values):
- left_values = left_values.view("i8")
- right_values = right_values.view("i8")
if tolerance is not None:
tolerance = Timedelta(tolerance)
+
+ # TODO: we have no test cases with PeriodDtype here; probably
+ # need to adjust tolerance for that case.
+ if left_values.dtype.kind in ["m", "M"]:
+ # Make sure the i8 representation for tolerance
+ # matches that for left_values/right_values.
+ lvs = ensure_wrapped_if_datetimelike(left_values)
+ tolerance = tolerance.as_unit(lvs.unit)
+
tolerance = tolerance.value
+ # TODO: require left_values.dtype == right_values.dtype, or at least
+ # comparable for e.g. dt64tz
+ left_values = left_values.view("i8")
+ right_values = right_values.view("i8")
+
# a "by" parameter requires special handling
if self.left_by is not None:
# remove 'on' parameter from values if one existed
diff --git a/pandas/tests/reshape/merge/test_merge_asof.py b/pandas/tests/reshape/merge/test_merge_asof.py
index 4123f686163d4..3b522eaa075f0 100644
--- a/pandas/tests/reshape/merge/test_merge_asof.py
+++ b/pandas/tests/reshape/merge/test_merge_asof.py
@@ -16,6 +16,14 @@
from pandas.core.reshape.merge import MergeError
+@pytest.fixture(params=["s", "ms", "us", "ns"])
+def unit(request):
+ """
+ Resolution for datetimelike dtypes.
+ """
+ return request.param
+
+
class TestAsOfMerge:
def read_data(self, datapath, name, dedupe=False):
path = datapath("reshape", "merge", "data", name)
@@ -63,8 +71,13 @@ def test_examples1(self):
result = merge_asof(left, right, on="a")
tm.assert_frame_equal(result, expected)
- def test_examples2(self):
+ def test_examples2(self, unit):
"""doc-string examples"""
+ if unit == "s":
+ pytest.skip(
+ "This test is invalid for unit='s' because that would "
+ "round the trades['time']]"
+ )
trades = pd.DataFrame(
{
"time": to_datetime(
@@ -75,7 +88,7 @@ def test_examples2(self):
"20160525 13:30:00.048",
"20160525 13:30:00.048",
]
- ),
+ ).astype(f"M8[{unit}]"),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
@@ -96,7 +109,7 @@ def test_examples2(self):
"20160525 13:30:00.072",
"20160525 13:30:00.075",
]
- ),
+ ).astype(f"M8[{unit}]"),
"ticker": [
"GOOG",
"MSFT",
@@ -127,7 +140,7 @@ def test_examples2(self):
"20160525 13:30:00.048",
"20160525 13:30:00.048",
]
- ),
+ ).astype(f"M8[{unit}]"),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
@@ -639,7 +652,7 @@ def test_tolerance_nearest(self):
result = merge_asof(left, right, on="a", direction="nearest", tolerance=1)
tm.assert_frame_equal(result, expected)
- def test_tolerance_tz(self):
+ def test_tolerance_tz(self, unit):
# GH 14844
left = pd.DataFrame(
{
@@ -648,6 +661,7 @@ def test_tolerance_tz(self):
freq="D",
periods=5,
tz=pytz.timezone("UTC"),
+ unit=unit,
),
"value1": np.arange(5),
}
@@ -659,6 +673,7 @@ def test_tolerance_tz(self):
freq="D",
periods=5,
tz=pytz.timezone("UTC"),
+ unit=unit,
),
"value2": list("ABCDE"),
}
@@ -672,6 +687,7 @@ def test_tolerance_tz(self):
freq="D",
periods=5,
tz=pytz.timezone("UTC"),
+ unit=unit,
),
"value1": np.arange(5),
"value2": list("BCDEE"),
@@ -1314,22 +1330,27 @@ def test_by_mixed_tz_aware(self):
expected["value_y"] = np.array([np.nan], dtype=object)
tm.assert_frame_equal(result, expected)
- def test_timedelta_tolerance_nearest(self):
+ def test_timedelta_tolerance_nearest(self, unit):
# GH 27642
+ if unit == "s":
+ pytest.skip(
+ "This test is invalid with unit='s' because that would "
+ "round left['time']"
+ )
left = pd.DataFrame(
list(zip([0, 5, 10, 15, 20, 25], [0, 1, 2, 3, 4, 5])),
columns=["time", "left"],
)
- left["time"] = pd.to_timedelta(left["time"], "ms")
+ left["time"] = pd.to_timedelta(left["time"], "ms").astype(f"m8[{unit}]")
right = pd.DataFrame(
list(zip([0, 3, 9, 12, 15, 18], [0, 1, 2, 3, 4, 5])),
columns=["time", "right"],
)
- right["time"] = pd.to_timedelta(right["time"], "ms")
+ right["time"] = pd.to_timedelta(right["time"], "ms").astype(f"m8[{unit}]")
expected = pd.DataFrame(
list(
@@ -1342,7 +1363,7 @@ def test_timedelta_tolerance_nearest(self):
columns=["time", "left", "right"],
)
- expected["time"] = pd.to_timedelta(expected["time"], "ms")
+ expected["time"] = pd.to_timedelta(expected["time"], "ms").astype(f"m8[{unit}]")
result = merge_asof(
left, right, on="time", tolerance=Timedelta("1ms"), direction="nearest"
@@ -1400,12 +1421,17 @@ def test_merge_index_column_tz(self):
)
tm.assert_frame_equal(result, expected)
- def test_left_index_right_index_tolerance(self):
+ def test_left_index_right_index_tolerance(self, unit):
# https://github.com/pandas-dev/pandas/issues/35558
- dr1 = pd.date_range(start="1/1/2020", end="1/20/2020", freq="2D") + Timedelta(
- seconds=0.4
- )
- dr2 = pd.date_range(start="1/1/2020", end="2/1/2020")
+ if unit == "s":
+ pytest.skip(
+ "This test is invalid with unit='s' because that would round dr1"
+ )
+
+ dr1 = pd.date_range(
+ start="1/1/2020", end="1/20/2020", freq="2D", unit=unit
+ ) + Timedelta(seconds=0.4).as_unit(unit)
+ dr2 = pd.date_range(start="1/1/2020", end="2/1/2020", unit=unit)
df1 = pd.DataFrame({"val1": "foo"}, index=pd.DatetimeIndex(dr1))
df2 = pd.DataFrame({"val2": "bar"}, index=pd.DatetimeIndex(dr2))
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50835 | 2023-01-18T18:33:22Z | 2023-01-23T19:41:26Z | 2023-01-23T19:41:26Z | 2023-05-13T15:07:15Z |
ENH: Improve performance for idxmin and idxmax | diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 1a071ab978de9..02afc888ff762 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -870,6 +870,7 @@ Performance improvements
- Performance improvement in :func:`read_html` when there are multiple tables (:issue:`49929`)
- Performance improvement in :func:`to_datetime` when using ``'%Y%m%d'`` format (:issue:`17410`)
- Performance improvement in :func:`to_datetime` when format is given or can be inferred (:issue:`50465`)
+- Performance improvement in :meth:`Series.idxmax` and :meth:`Series.idxmin` for nullable dtypes (:issue:`1`)
- Performance improvement in :func:`read_csv` when passing :func:`to_datetime` lambda-function to ``date_parser`` and inputs have mixed timezone offsetes (:issue:`35296`)
- Performance improvement in :meth:`.SeriesGroupBy.value_counts` with categorical dtype (:issue:`46202`)
- Fixed a reference leak in :func:`read_hdf` (:issue:`37441`)
diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py
index 77735add89bf7..8706694a76bff 100644
--- a/pandas/core/arrays/masked.py
+++ b/pandas/core/arrays/masked.py
@@ -36,7 +36,10 @@
)
from pandas.errors import AbstractMethodError
from pandas.util._decorators import doc
-from pandas.util._validators import validate_fillna_kwargs
+from pandas.util._validators import (
+ validate_bool_kwarg,
+ validate_fillna_kwargs,
+)
from pandas.core.dtypes.base import ExtensionDtype
from pandas.core.dtypes.common import (
@@ -83,6 +86,7 @@
from pandas.core.construction import ensure_wrapped_if_datetimelike
from pandas.core.indexers import check_array_indexer
from pandas.core.ops import invalid_comparison
+from pandas.core.sorting import nargminmax
if TYPE_CHECKING:
from pandas import Series
@@ -1378,6 +1382,54 @@ def all(self, *, skipna: bool = True, **kwargs):
else:
return self.dtype.na_value
+ def argmin(self, skipna: bool = True) -> int:
+ """
+ Return the index of minimum value.
+
+ In case of multiple occurrences of the minimum value, the index
+ corresponding to the first occurrence is returned.
+
+ Parameters
+ ----------
+ skipna : bool, default True
+
+ Returns
+ -------
+ int
+
+ See Also
+ --------
+ BaseMaskedArray.argmax
+ """
+ validate_bool_kwarg(skipna, "skipna")
+ if not skipna and self._hasna:
+ raise NotImplementedError
+ return nargminmax(self, "argmin", mask=self._mask)
+
+ def argmax(self, skipna: bool = True) -> int:
+ """
+ Return the index of maximum value.
+
+ In case of multiple occurrences of the maximum value, the index
+ corresponding to the first occurrence is returned.
+
+ Parameters
+ ----------
+ skipna : bool, default True
+
+ Returns
+ -------
+ int
+
+ See Also
+ --------
+ BaseMaskedArray.argmin
+ """
+ validate_bool_kwarg(skipna, "skipna")
+ if not skipna and self._hasna:
+ raise NotImplementedError
+ return nargminmax(self, "argmax", self._mask)
+
def _accumulate(
self, name: str, *, skipna: bool = True, **kwargs
) -> BaseMaskedArray:
diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py
index b28a9def8a7ea..68ce5e2c42a2c 100644
--- a/pandas/core/sorting.py
+++ b/pandas/core/sorting.py
@@ -442,7 +442,12 @@ def nargsort(
return ensure_platform_int(indexer)
-def nargminmax(values: ExtensionArray, method: str, axis: AxisInt = 0):
+def nargminmax(
+ values: ExtensionArray,
+ method: str,
+ axis: AxisInt = 0,
+ mask: npt.NDArray[np.bool_] | None = None,
+):
"""
Implementation of np.argmin/argmax but for ExtensionArray and which
handles missing values.
@@ -452,6 +457,7 @@ def nargminmax(values: ExtensionArray, method: str, axis: AxisInt = 0):
values : ExtensionArray
method : {"argmax", "argmin"}
axis : int, default 0
+ mask : npt.NDArray[np.bool_], optional
Returns
-------
@@ -460,7 +466,8 @@ def nargminmax(values: ExtensionArray, method: str, axis: AxisInt = 0):
assert method in {"argmax", "argmin"}
func = np.argmax if method == "argmax" else np.argmin
- mask = np.asarray(isna(values))
+ if mask is None:
+ mask = np.asarray(isna(values))
arr_values = values._values_for_argsort()
if arr_values.ndim > 1:
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
This avoids re-computing the mask in the nargminmax algorithm | https://api.github.com/repos/pandas-dev/pandas/pulls/50834 | 2023-01-18T18:22:12Z | 2023-01-18T18:29:53Z | null | 2023-01-18T18:37:01Z |
ENH: Get rid of float cast in masked reduction ops | diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py
index 77735add89bf7..d45fe05d52937 100644
--- a/pandas/core/arrays/masked.py
+++ b/pandas/core/arrays/masked.py
@@ -1081,12 +1081,7 @@ def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
data = self._data
mask = self._mask
- # coerce to a nan-aware float if needed
- # (we explicitly use NaN within reductions)
- if self._hasna:
- data = self.to_numpy("float64", na_value=np.nan)
-
- # median, skew, kurt, idxmin, idxmax
+ # median, skew, kurt, sem
op = getattr(nanops, f"nan{name}")
result = op(data, axis=0, skipna=skipna, mask=mask, **kwargs)
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index 02372356d3fe4..c22af960927f6 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -758,15 +758,15 @@ def get_median(x):
res = np.nanmedian(x[mask])
return res
- values, mask, dtype, _, _ = _get_values(values, skipna, mask=mask)
+ values, mask, dtype, _, _ = _get_values(values, skipna, mask=mask, fill_value=0)
if not is_float_dtype(values.dtype):
try:
values = values.astype("f8")
except ValueError as err:
# e.g. "could not convert string to float: 'a'"
raise TypeError(str(err)) from err
- if mask is not None:
- values[mask] = np.nan
+ if mask is not None:
+ values[mask] = np.nan
notempty = values.size
@@ -1040,8 +1040,11 @@ def nansem(
if not is_float_dtype(values.dtype):
values = values.astype("f8")
+ if not skipna and mask is not None and mask.any():
+ return np.nan
+
count, _ = _get_counts_nanvar(values.shape, mask, axis, ddof, values.dtype)
- var = nanvar(values, axis=axis, skipna=skipna, ddof=ddof)
+ var = nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask)
return np.sqrt(var) / np.sqrt(count)
@@ -1222,6 +1225,8 @@ def nanskew(
if skipna and mask is not None:
values = values.copy()
np.putmask(values, mask, 0)
+ elif not skipna and mask is not None and mask.any():
+ return np.nan
mean = values.sum(axis, dtype=np.float64) / count
if axis is not None:
@@ -1310,6 +1315,8 @@ def nankurt(
if skipna and mask is not None:
values = values.copy()
np.putmask(values, mask, 0)
+ elif not skipna and mask is not None and mask.any():
+ return np.nan
mean = values.sum(axis, dtype=np.float64) / count
if axis is not None:
| - [x] closes #30436 (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Shouldn't be user visible | https://api.github.com/repos/pandas-dev/pandas/pulls/50833 | 2023-01-18T18:08:31Z | 2023-01-19T20:40:19Z | 2023-01-19T20:40:19Z | 2023-01-19T20:41:06Z |
DOC fix link in bug report | diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml
index d9330e396ed12..4e1bc8f61d04e 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.yaml
+++ b/.github/ISSUE_TEMPLATE/bug_report.yaml
@@ -17,8 +17,8 @@ body:
[latest version](https://pandas.pydata.org/docs/whatsnew/index.html) of pandas.
required: true
- label: >
- I have confirmed this bug exists on the [main branch]
- (https://pandas.pydata.org/docs/dev/getting_started/install.html#installing-the-development-version-of-pandas)
+ I have confirmed this bug exists on the
+ [main branch](https://pandas.pydata.org/docs/dev/getting_started/install.html#installing-the-development-version-of-pandas)
of pandas.
- type: textarea
id: example
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50831 | 2023-01-18T16:32:56Z | 2023-01-18T16:48:20Z | 2023-01-18T16:48:20Z | 2023-01-18T18:08:18Z |
CLN remove unneccessary code from array_strptime which doesn't spark joy | diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx
index 69878625295d6..3ca87f8680b53 100644
--- a/pandas/_libs/tslibs/strptime.pyx
+++ b/pandas/_libs/tslibs/strptime.pyx
@@ -183,52 +183,51 @@ def array_strptime(
bint found_naive = False
bint found_tz = False
tzinfo tz_out = None
- bint iso_format = fmt is not None and format_is_iso(fmt)
+ bint iso_format = format_is_iso(fmt)
NPY_DATETIMEUNIT out_bestunit
int out_local = 0, out_tzoffset = 0
assert is_raise or is_ignore or is_coerce
- if fmt is not None:
- if "%W" in fmt or "%U" in fmt:
- if "%Y" not in fmt and "%y" not in fmt:
- raise ValueError("Cannot use '%W' or '%U' without day and year")
- if "%A" not in fmt and "%a" not in fmt and "%w" not in fmt:
- raise ValueError("Cannot use '%W' or '%U' without day and year")
- elif "%Z" in fmt and "%z" in fmt:
- raise ValueError("Cannot parse both %Z and %z")
- elif "%j" in fmt and "%G" in fmt:
- raise ValueError("Day of the year directive '%j' is not "
- "compatible with ISO year directive '%G'. "
- "Use '%Y' instead.")
- elif "%G" in fmt and (
- "%V" not in fmt
- or not (
- "%A" in fmt
- or "%a" in fmt
- or "%w" in fmt
- or "%u" in fmt
- )
- ):
- raise ValueError("ISO year directive '%G' must be used with "
- "the ISO week directive '%V' and a weekday "
- "directive '%A', '%a', '%w', or '%u'.")
- elif "%V" in fmt and "%Y" in fmt:
- raise ValueError("ISO week directive '%V' is incompatible with "
- "the year directive '%Y'. Use the ISO year "
- "'%G' instead.")
- elif "%V" in fmt and (
- "%G" not in fmt
- or not (
- "%A" in fmt
- or "%a" in fmt
- or "%w" in fmt
- or "%u" in fmt
- )
- ):
- raise ValueError("ISO week directive '%V' must be used with "
- "the ISO year directive '%G' and a weekday "
- "directive '%A', '%a', '%w', or '%u'.")
+ if "%W" in fmt or "%U" in fmt:
+ if "%Y" not in fmt and "%y" not in fmt:
+ raise ValueError("Cannot use '%W' or '%U' without day and year")
+ if "%A" not in fmt and "%a" not in fmt and "%w" not in fmt:
+ raise ValueError("Cannot use '%W' or '%U' without day and year")
+ elif "%Z" in fmt and "%z" in fmt:
+ raise ValueError("Cannot parse both %Z and %z")
+ elif "%j" in fmt and "%G" in fmt:
+ raise ValueError("Day of the year directive '%j' is not "
+ "compatible with ISO year directive '%G'. "
+ "Use '%Y' instead.")
+ elif "%G" in fmt and (
+ "%V" not in fmt
+ or not (
+ "%A" in fmt
+ or "%a" in fmt
+ or "%w" in fmt
+ or "%u" in fmt
+ )
+ ):
+ raise ValueError("ISO year directive '%G' must be used with "
+ "the ISO week directive '%V' and a weekday "
+ "directive '%A', '%a', '%w', or '%u'.")
+ elif "%V" in fmt and "%Y" in fmt:
+ raise ValueError("ISO week directive '%V' is incompatible with "
+ "the year directive '%Y'. Use the ISO year "
+ "'%G' instead.")
+ elif "%V" in fmt and (
+ "%G" not in fmt
+ or not (
+ "%A" in fmt
+ or "%a" in fmt
+ or "%w" in fmt
+ or "%u" in fmt
+ )
+ ):
+ raise ValueError("ISO week directive '%V' must be used with "
+ "the ISO year directive '%G' and a weekday "
+ "directive '%A', '%a', '%w', or '%u'.")
global _TimeRE_cache, _regex_cache
with _cache_lock:
| `fmt` can't be `None` so there's more complexity here than need be
If reviewing, I'd suggest using the "hide whitespace" option | https://api.github.com/repos/pandas-dev/pandas/pulls/50830 | 2023-01-18T16:28:31Z | 2023-01-18T19:23:20Z | 2023-01-18T19:23:20Z | 2023-01-18T19:23:28Z |
PERF: datetime parsing with Q | diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx
index 9e593ec64f7d2..1b81d53c09e7e 100644
--- a/pandas/_libs/tslibs/parsing.pyx
+++ b/pandas/_libs/tslibs/parsing.pyx
@@ -472,16 +472,15 @@ cpdef bint _does_string_look_like_datetime(str py_string):
return True
-cdef object _parse_dateabbr_string(object date_string, datetime default,
+cdef object _parse_dateabbr_string(str date_string, datetime default,
str freq=None):
+ # special handling for possibilities eg, 2Q2005, 2Q05, 2005Q1, 05Q1
cdef:
datetime ret
# year initialized to prevent compiler warnings
int year = -1, quarter = -1, month
Py_ssize_t date_len
-
- # special handling for possibilities eg, 2Q2005, 2Q05, 2005Q1, 05Q1
- assert isinstance(date_string, str)
+ const char* buf
if date_string in nat_strings:
return NaT, ""
@@ -498,10 +497,11 @@ cdef object _parse_dateabbr_string(object date_string, datetime default,
pass
if 4 <= date_len <= 7:
+ buf = get_c_string_buf_and_size(date_string, &date_len)
try:
i = date_string.index("Q", 1, 6)
if i == 1:
- quarter = int(date_string[0])
+ quarter = _parse_1digit(buf) # i.e. int(date_string[0])
if date_len == 4 or (date_len == 5
and date_string[i + 1] == "-"):
# r'(\d)Q-?(\d\d)')
@@ -516,7 +516,8 @@ cdef object _parse_dateabbr_string(object date_string, datetime default,
# r'(\d\d)-?Q(\d)'
if date_len == 4 or (date_len == 5
and date_string[i - 1] == "-"):
- quarter = int(date_string[-1])
+ # i.e. quarter = int(date_string[-1])
+ quarter = _parse_1digit(buf + date_len - 1)
year = 2000 + int(date_string[:2])
else:
raise ValueError
@@ -524,7 +525,8 @@ cdef object _parse_dateabbr_string(object date_string, datetime default,
if date_len == 6 or (date_len == 7
and date_string[i - 1] == "-"):
# r'(\d\d\d\d)-?Q(\d)'
- quarter = int(date_string[-1])
+ # i.e. quarter = int(date_string[-1])
+ quarter = _parse_1digit(buf + date_len - 1)
year = int(date_string[:4])
else:
raise ValueError
| ```
In [3]: %timeit pd.Timestamp("2014Q1")
3.04 µs ± 63.6 ns per loop (mean ± std. dev. of 7 runs, 100,000 loops each) # <- main
2.8 µs ± 19.2 ns per loop (mean ± std. dev. of 7 runs, 100,000 loops each) # <- PR
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/50828 | 2023-01-18T16:02:50Z | 2023-01-18T19:19:53Z | 2023-01-18T19:19:53Z | 2023-01-18T19:21:05Z |
Manual backport fix github quota | diff --git a/.github/workflows/docbuild-and-upload.yml b/.github/workflows/docbuild-and-upload.yml
index 17ebed5a8c1e6..13da56806de6e 100644
--- a/.github/workflows/docbuild-and-upload.yml
+++ b/.github/workflows/docbuild-and-upload.yml
@@ -45,12 +45,6 @@ jobs:
- name: Build Pandas
uses: ./.github/actions/build_pandas
- - name: Set up maintainers cache
- uses: actions/cache@v3
- with:
- path: maintainers.json
- key: maintainers
-
- name: Build website
run: python web/pandas_web.py web/pandas --target-path=web/build
diff --git a/web/pandas/about/team.md b/web/pandas/about/team.md
index 261d577b2abc1..bdd5d5d2b2468 100644
--- a/web/pandas/about/team.md
+++ b/web/pandas/about/team.md
@@ -9,7 +9,8 @@ If you want to support pandas development, you can find information in the [dona
## Active maintainers
<div class="card-group maintainers">
- {% for person in maintainers.active_with_github_info %}
+ {% for username in maintainers.active %}
+ {% set person = maintainers.github_info.get(username) %}
<div class="card">
<img class="card-img-top" alt="" src="{{ person.avatar_url }}"/>
<div class="card-body">
@@ -63,7 +64,8 @@ The project governance is available in the [project governance page](governance.
## Inactive maintainers
<ul>
- {% for person in maintainers.inactive_with_github_info %}
+ {% for username in maintainers.inactive %}
+ {% set person = maintainers.github_info.get(username) %}
<li>
<a href="{{ person.blog or person.html_url }}">
{{ person.name or person.login }}
diff --git a/web/pandas/config.yml b/web/pandas/config.yml
index 16e1357d405a0..85dee1d114800 100644
--- a/web/pandas/config.yml
+++ b/web/pandas/config.yml
@@ -1,10 +1,10 @@
main:
templates_path: _templates
base_template: "layout.html"
+ production_url: "https://pandas.pydata.org/"
ignore:
- _templates/layout.html
- config.yml
- - try.md # the binder page will be added later
github_repo_url: pandas-dev/pandas
context_preprocessors:
- pandas_web.Preprocessors.current_year
diff --git a/web/pandas_web.py b/web/pandas_web.py
index 3e5b089cab64a..1a2bc45bd87e0 100755
--- a/web/pandas_web.py
+++ b/web/pandas_web.py
@@ -158,35 +158,39 @@ def maintainers_add_info(context):
Given the active maintainers defined in the yaml file, it fetches
the GitHub user information for them.
"""
- timestamp = time.time()
-
- cache_file = pathlib.Path("maintainers.json")
- if cache_file.is_file():
- with open(cache_file) as f:
- context["maintainers"] = json.load(f)
- # refresh cache after 1 hour
- if (timestamp - context["maintainers"]["timestamp"]) < 3_600:
- return context
-
- context["maintainers"]["timestamp"] = timestamp
-
repeated = set(context["maintainers"]["active"]) & set(
context["maintainers"]["inactive"]
)
if repeated:
raise ValueError(f"Maintainers {repeated} are both active and inactive")
- for kind in ("active", "inactive"):
- context["maintainers"][f"{kind}_with_github_info"] = []
- for user in context["maintainers"][kind]:
- resp = requests.get(f"https://api.github.com/users/{user}")
- if context["ignore_io_errors"] and resp.status_code == 403:
- return context
- resp.raise_for_status()
- context["maintainers"][f"{kind}_with_github_info"].append(resp.json())
+ maintainers_info = {}
+ for user in (
+ context["maintainers"]["active"] + context["maintainers"]["inactive"]
+ ):
+ resp = requests.get(f"https://api.github.com/users/{user}")
+ if resp.status_code == 403:
+ sys.stderr.write(
+ "WARN: GitHub API quota exceeded when fetching maintainers\n"
+ )
+ # if we exceed github api quota, we use the github info
+ # of maintainers saved with the website
+ resp_bkp = requests.get(
+ context["main"]["production_url"] + "maintainers.json"
+ )
+ resp_bkp.raise_for_status()
+ maintainers_info = resp_bkp.json()
+ break
+
+ resp.raise_for_status()
+ maintainers_info[user] = resp.json()
- with open(cache_file, "w") as f:
- json.dump(context["maintainers"], f)
+ context["maintainers"]["github_info"] = maintainers_info
+
+ # save the data fetched from github to use it in case we exceed
+ # git github api quota in the future
+ with open(pathlib.Path(context["target_path"]) / "maintainers.json", "w") as f:
+ json.dump(maintainers_info, f)
return context
@@ -196,11 +200,19 @@ def home_add_releases(context):
github_repo_url = context["main"]["github_repo_url"]
resp = requests.get(f"https://api.github.com/repos/{github_repo_url}/releases")
- if context["ignore_io_errors"] and resp.status_code == 403:
- return context
- resp.raise_for_status()
+ if resp.status_code == 403:
+ sys.stderr.write("WARN: GitHub API quota exceeded when fetching releases\n")
+ resp_bkp = requests.get(context["main"]["production_url"] + "releases.json")
+ resp_bkp.raise_for_status()
+ releases = resp_bkp.json()
+ else:
+ resp.raise_for_status()
+ releases = resp.json()
- for release in resp.json():
+ with open(pathlib.Path(context["target_path"]) / "releases.json", "w") as f:
+ json.dump(releases, f, default=datetime.datetime.isoformat)
+
+ for release in releases:
if release["prerelease"]:
continue
published = datetime.datetime.strptime(
@@ -218,6 +230,7 @@ def home_add_releases(context):
),
}
)
+
return context
@staticmethod
@@ -264,12 +277,20 @@ def roadmap_pdeps(context):
"https://api.github.com/search/issues?"
f"q=is:pr is:open label:PDEP repo:{github_repo_url}"
)
- if context["ignore_io_errors"] and resp.status_code == 403:
- return context
- resp.raise_for_status()
+ if resp.status_code == 403:
+ sys.stderr.write("WARN: GitHub API quota exceeded when fetching pdeps\n")
+ resp_bkp = requests.get(context["main"]["production_url"] + "pdeps.json")
+ resp_bkp.raise_for_status()
+ pdeps = resp_bkp.json()
+ else:
+ resp.raise_for_status()
+ pdeps = resp.json()
+
+ with open(pathlib.Path(context["target_path"]) / "pdeps.json", "w") as f:
+ json.dump(pdeps, f)
- for pdep in resp.json()["items"]:
- context["pdeps"]["under_discussion"].append(
+ for pdep in pdeps["items"]:
+ context["pdeps"]["Under discussion"].append(
{"title": pdep["title"], "url": pdep["url"]}
)
@@ -302,7 +323,7 @@ def get_callable(obj_as_str: str) -> object:
return obj
-def get_context(config_fname: str, ignore_io_errors: bool, **kwargs):
+def get_context(config_fname: str, **kwargs):
"""
Load the config yaml as the base context, and enrich it with the
information added by the context preprocessors defined in the file.
@@ -311,7 +332,6 @@ def get_context(config_fname: str, ignore_io_errors: bool, **kwargs):
context = yaml.safe_load(f)
context["source_path"] = os.path.dirname(config_fname)
- context["ignore_io_errors"] = ignore_io_errors
context.update(kwargs)
preprocessors = (
@@ -349,7 +369,9 @@ def extend_base_template(content: str, base_template: str) -> str:
def main(
- source_path: str, target_path: str, base_url: str, ignore_io_errors: bool
+ source_path: str,
+ target_path: str,
+ base_url: str,
) -> int:
"""
Copy every file in the source directory to the target directory.
@@ -363,7 +385,7 @@ def main(
os.makedirs(target_path, exist_ok=True)
sys.stderr.write("Generating context...\n")
- context = get_context(config_fname, ignore_io_errors, base_url=base_url)
+ context = get_context(config_fname, base_url=base_url, target_path=target_path)
sys.stderr.write("Context generated\n")
templates_path = os.path.join(source_path, context["main"]["templates_path"])
@@ -407,15 +429,5 @@ def main(
parser.add_argument(
"--base-url", default="", help="base url where the website is served from"
)
- parser.add_argument(
- "--ignore-io-errors",
- action="store_true",
- help="do not fail if errors happen when fetching "
- "data from http sources, and those fail "
- "(mostly useful to allow github quota errors "
- "when running the script locally)",
- )
args = parser.parse_args()
- sys.exit(
- main(args.source_path, args.target_path, args.base_url, args.ignore_io_errors)
- )
+ sys.exit(main(args.source_path, args.target_path, args.base_url))
| xref #50811 | https://api.github.com/repos/pandas-dev/pandas/pulls/50827 | 2023-01-18T15:26:01Z | 2023-01-18T17:48:10Z | 2023-01-18T17:48:10Z | 2023-01-18T17:48:11Z |
DEPR: remove Int/Uint/Float64Index from pandas/tests/indexes/ranges | diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 73e4a51ca3e7c..0ff733ab51b85 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -47,11 +47,7 @@
from pandas.core.construction import extract_array
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import maybe_extract_name
-from pandas.core.indexes.numeric import (
- Float64Index,
- Int64Index,
- NumericIndex,
-)
+from pandas.core.indexes.numeric import NumericIndex
from pandas.core.ops.common import unpack_zerodim_and_defer
if TYPE_CHECKING:
@@ -64,8 +60,8 @@ class RangeIndex(NumericIndex):
"""
Immutable Index implementing a monotonic integer range.
- RangeIndex is a memory-saving special case of Int64Index limited to
- representing monotonic ranges. Using RangeIndex may in some instances
+ RangeIndex is a memory-saving special case of an Index limited to representing
+ monotonic ranges with a 64-bit dtype. Using RangeIndex may in some instances
improve computing speed.
This is the default index type used
@@ -97,7 +93,6 @@ class RangeIndex(NumericIndex):
See Also
--------
Index : The base pandas Index type.
- Int64Index : Index of int64 data.
"""
_typ = "rangeindex"
@@ -185,7 +180,7 @@ def _simple_new( # type: ignore[override]
# --------------------------------------------------------------------
- # error: Return type "Type[Int64Index]" of "_constructor" incompatible with return
+ # error: Return type "Type[NumericIndex]" of "_constructor" incompatible with return
# type "Type[RangeIndex]" in supertype "Index"
@cache_readonly
def _constructor(self) -> type[NumericIndex]: # type: ignore[override]
@@ -331,7 +326,7 @@ def inferred_type(self) -> str:
# --------------------------------------------------------------------
# Indexing Methods
- @doc(Int64Index.get_loc)
+ @doc(NumericIndex.get_loc)
def get_loc(self, key):
if is_integer(key) or (is_float(key) and key.is_integer()):
new_key = int(key)
@@ -377,32 +372,32 @@ def _get_indexer(
def tolist(self) -> list[int]:
return list(self._range)
- @doc(Int64Index.__iter__)
+ @doc(NumericIndex.__iter__)
def __iter__(self) -> Iterator[int]:
yield from self._range
- @doc(Int64Index._shallow_copy)
+ @doc(NumericIndex._shallow_copy)
def _shallow_copy(self, values, name: Hashable = no_default):
name = self.name if name is no_default else name
if values.dtype.kind == "f":
- return Float64Index(values, name=name)
+ return NumericIndex(values, name=name, dtype=np.float64)
# GH 46675 & 43885: If values is equally spaced, return a
- # more memory-compact RangeIndex instead of Int64Index
+ # more memory-compact RangeIndex instead of Index with 64-bit dtype
unique_diffs = unique_deltas(values)
if len(unique_diffs) == 1 and unique_diffs[0] != 0:
diff = unique_diffs[0]
new_range = range(values[0], values[-1] + diff, diff)
return type(self)._simple_new(new_range, name=name)
else:
- return Int64Index._simple_new(values, name=name)
+ return NumericIndex._simple_new(values, name=name)
def _view(self: RangeIndex) -> RangeIndex:
result = type(self)._simple_new(self._range, name=self._name)
result._cache = self._cache
return result
- @doc(Int64Index.copy)
+ @doc(NumericIndex.copy)
def copy(self, name: Hashable = None, deep: bool = False):
name = self._validate_names(name=name, deep=deep)[0]
new_index = self._rename(name=name)
@@ -517,7 +512,6 @@ def _intersection(self, other: Index, sort: bool = False):
# caller is responsible for checking self and other are both non-empty
if not isinstance(other, RangeIndex):
- # Int64Index
return super()._intersection(other, sort=sort)
first = self._range[::-1] if self.step < 0 else self._range
@@ -604,10 +598,10 @@ def _union(self, other: Index, sort):
sort : False or None, default None
Whether to sort (monotonically increasing) the resulting index.
``sort=None`` returns a ``RangeIndex`` if possible or a sorted
- ``Int64Index`` if not.
+ ``Index`` with a int64 dtype if not.
``sort=False`` can return a ``RangeIndex`` if self is monotonically
increasing and other is fully contained in self. Otherwise, returns
- an unsorted ``Int64Index``
+ an unsorted ``Index`` with an int64 dtype.
Returns
-------
@@ -819,9 +813,9 @@ def _concat(self, indexes: list[Index], name: Hashable) -> Index:
Overriding parent method for the case of all RangeIndex instances.
When all members of "indexes" are of type RangeIndex: result will be
- RangeIndex if possible, Int64Index otherwise. E.g.:
+ RangeIndex if possible, Index with a int64 dtype otherwise. E.g.:
indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6)
- indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Int64Index([0,1,2,4,5])
+ indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Index([0,1,2,4,5], dtype='int64')
"""
if not all(isinstance(x, RangeIndex) for x in indexes):
return super()._concat(indexes, name)
@@ -848,7 +842,7 @@ def _concat(self, indexes: list[Index], name: Hashable) -> Index:
# First non-empty index had only one element
if rng.start == start:
values = np.concatenate([x._values for x in rng_indexes])
- result = Int64Index(values)
+ result = self._constructor(values)
return result.rename(name)
step = rng.start - start
@@ -857,7 +851,9 @@ def _concat(self, indexes: list[Index], name: Hashable) -> Index:
next_ is not None and rng.start != next_
)
if non_consecutive:
- result = Int64Index(np.concatenate([x._values for x in rng_indexes]))
+ result = self._constructor(
+ np.concatenate([x._values for x in rng_indexes])
+ )
return result.rename(name)
if step is not None:
@@ -905,7 +901,6 @@ def __getitem__(self, key):
"and integer or boolean "
"arrays are valid indices"
)
- # fall back to Int64Index
return super().__getitem__(key)
def _getitem_slice(self: RangeIndex, slobj: slice) -> RangeIndex:
@@ -1010,15 +1005,14 @@ def _arith_method(self, other, op):
res_name = ops.get_op_result_name(self, other)
result = type(self)(rstart, rstop, rstep, name=res_name)
- # for compat with numpy / Int64Index
+ # for compat with numpy / Index with int64 dtype
# even if we can represent as a RangeIndex, return
- # as a Float64Index if we have float-like descriptors
+ # as a float64 Index if we have float-like descriptors
if not all(is_integer(x) for x in [rstart, rstop, rstep]):
result = result.astype("float64")
return result
except (ValueError, TypeError, ZeroDivisionError):
- # Defer to Int64Index implementation
# test_arithmetic_explicit_conversions
return super()._arith_method(other, op)
diff --git a/pandas/tests/indexes/ranges/test_join.py b/pandas/tests/indexes/ranges/test_join.py
index c3c2560693d3d..958dd6aa9a563 100644
--- a/pandas/tests/indexes/ranges/test_join.py
+++ b/pandas/tests/indexes/ranges/test_join.py
@@ -1,24 +1,25 @@
import numpy as np
+from pandas.core.dtypes.common import is_int64_dtype
+
from pandas import (
Index,
RangeIndex,
)
import pandas._testing as tm
-from pandas.core.indexes.api import Int64Index
class TestJoin:
def test_join_outer(self):
- # join with Int64Index
+ # join with Index[int64]
index = RangeIndex(start=0, stop=20, step=2)
- other = Int64Index(np.arange(25, 14, -1))
+ other = Index(np.arange(25, 14, -1, dtype=np.int64))
res, lidx, ridx = index.join(other, how="outer", return_indexers=True)
noidx_res = index.join(other, how="outer")
tm.assert_index_equal(res, noidx_res)
- eres = Int64Index(
+ eres = Index(
[0, 2, 4, 6, 8, 10, 12, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]
)
elidx = np.array(
@@ -30,9 +31,9 @@ def test_join_outer(self):
dtype=np.intp,
)
- assert isinstance(res, Int64Index)
+ assert isinstance(res, Index) and is_int64_dtype(res.dtype)
assert not isinstance(res, RangeIndex)
- tm.assert_index_equal(res, eres)
+ tm.assert_index_equal(res, eres, exact=True)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
@@ -43,7 +44,7 @@ def test_join_outer(self):
noidx_res = index.join(other, how="outer")
tm.assert_index_equal(res, noidx_res)
- assert isinstance(res, Int64Index)
+ assert isinstance(res, Index) and res.dtype == np.int64
assert not isinstance(res, RangeIndex)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
@@ -52,7 +53,7 @@ def test_join_outer(self):
def test_join_inner(self):
# Join with non-RangeIndex
index = RangeIndex(start=0, stop=20, step=2)
- other = Int64Index(np.arange(25, 14, -1))
+ other = Index(np.arange(25, 14, -1, dtype=np.int64))
res, lidx, ridx = index.join(other, how="inner", return_indexers=True)
@@ -62,7 +63,7 @@ def test_join_inner(self):
lidx = lidx.take(ind)
ridx = ridx.take(ind)
- eres = Int64Index([16, 18])
+ eres = Index([16, 18])
elidx = np.array([8, 9], dtype=np.intp)
eridx = np.array([9, 7], dtype=np.intp)
@@ -82,9 +83,9 @@ def test_join_inner(self):
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_left(self):
- # Join with Int64Index
+ # Join with Index[int64]
index = RangeIndex(start=0, stop=20, step=2)
- other = Int64Index(np.arange(25, 14, -1))
+ other = Index(np.arange(25, 14, -1, dtype=np.int64))
res, lidx, ridx = index.join(other, how="left", return_indexers=True)
eres = index
@@ -96,7 +97,7 @@ def test_join_left(self):
tm.assert_numpy_array_equal(ridx, eridx)
# Join withRangeIndex
- other = Int64Index(np.arange(25, 14, -1))
+ other = Index(np.arange(25, 14, -1, dtype=np.int64))
res, lidx, ridx = index.join(other, how="left", return_indexers=True)
@@ -106,15 +107,15 @@ def test_join_left(self):
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_right(self):
- # Join with Int64Index
+ # Join with Index[int64]
index = RangeIndex(start=0, stop=20, step=2)
- other = Int64Index(np.arange(25, 14, -1))
+ other = Index(np.arange(25, 14, -1, dtype=np.int64))
res, lidx, ridx = index.join(other, how="right", return_indexers=True)
eres = other
elidx = np.array([-1, -1, -1, -1, -1, -1, -1, 9, -1, 8, -1], dtype=np.intp)
- assert isinstance(other, Int64Index)
+ assert isinstance(other, Index) and other.dtype == np.int64
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
assert ridx is None
@@ -164,7 +165,7 @@ def test_join_non_unique(self):
res, lidx, ridx = index.join(other, return_indexers=True)
- eres = Int64Index([0, 2, 4, 4, 6, 8, 10, 12, 14, 16, 18])
+ eres = Index([0, 2, 4, 4, 6, 8, 10, 12, 14, 16, 18])
elidx = np.array([0, 1, 2, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.intp)
eridx = np.array([-1, -1, 0, 1, -1, -1, -1, -1, -1, -1, -1], dtype=np.intp)
diff --git a/pandas/tests/indexes/ranges/test_range.py b/pandas/tests/indexes/ranges/test_range.py
index e534147c891d2..d255dc748b7dc 100644
--- a/pandas/tests/indexes/ranges/test_range.py
+++ b/pandas/tests/indexes/ranges/test_range.py
@@ -4,20 +4,15 @@
from pandas.core.dtypes.common import ensure_platform_int
import pandas as pd
-import pandas._testing as tm
-from pandas.core.indexes.api import (
- Float64Index,
+from pandas import (
Index,
- Int64Index,
RangeIndex,
)
+import pandas._testing as tm
from pandas.tests.indexes.common import NumericBase
# aliases to make some tests easier to read
RI = RangeIndex
-I64 = Int64Index
-F64 = Float64Index
-OI = Index
class TestRangeIndex(NumericBase):
@@ -111,7 +106,7 @@ def test_insert(self):
tm.assert_index_equal(idx[0:4], result.insert(0, idx[0]), exact="equiv")
# GH 18295 (test missing)
- expected = Float64Index([0, np.nan, 1, 2, 3, 4])
+ expected = Index([0, np.nan, 1, 2, 3, 4], dtype=np.float64)
for na in [np.nan, None, pd.NA]:
result = RangeIndex(5).insert(1, na)
tm.assert_index_equal(result, expected)
@@ -379,7 +374,7 @@ def test_nbytes(self):
# memory savings vs int index
idx = RangeIndex(0, 1000)
- assert idx.nbytes < Int64Index(idx._values).nbytes / 10
+ assert idx.nbytes < Index(idx._values).nbytes / 10
# constant memory usage
i2 = RangeIndex(0, 10)
@@ -530,16 +525,16 @@ def test_len_specialised(self, step):
([RI(-4, -8), RI(-8, -12)], RI(0, 0)),
([RI(-4, -8), RI(3, -4)], RI(0, 0)),
([RI(-4, -8), RI(3, 5)], RI(3, 5)),
- ([RI(-4, -2), RI(3, 5)], I64([-4, -3, 3, 4])),
+ ([RI(-4, -2), RI(3, 5)], Index([-4, -3, 3, 4])),
([RI(-2), RI(3, 5)], RI(3, 5)),
- ([RI(2), RI(2)], I64([0, 1, 0, 1])),
+ ([RI(2), RI(2)], Index([0, 1, 0, 1])),
([RI(2), RI(2, 5), RI(5, 8, 4)], RI(0, 6)),
- ([RI(2), RI(3, 5), RI(5, 8, 4)], I64([0, 1, 3, 4, 5])),
+ ([RI(2), RI(3, 5), RI(5, 8, 4)], Index([0, 1, 3, 4, 5])),
([RI(-2, 2), RI(2, 5), RI(5, 8, 4)], RI(-2, 6)),
- ([RI(3), OI([-1, 3, 15])], OI([0, 1, 2, -1, 3, 15])),
- ([RI(3), OI([-1, 3.1, 15.0])], OI([0, 1, 2, -1, 3.1, 15.0])),
- ([RI(3), OI(["a", None, 14])], OI([0, 1, 2, "a", None, 14])),
- ([RI(3, 1), OI(["a", None, 14])], OI(["a", None, 14])),
+ ([RI(3), Index([-1, 3, 15])], Index([0, 1, 2, -1, 3, 15])),
+ ([RI(3), Index([-1, 3.1, 15.0])], Index([0, 1, 2, -1, 3.1, 15.0])),
+ ([RI(3), Index(["a", None, 14])], Index([0, 1, 2, "a", None, 14])),
+ ([RI(3, 1), Index(["a", None, 14])], Index(["a", None, 14])),
]
)
def appends(self, request):
diff --git a/pandas/tests/indexes/ranges/test_setops.py b/pandas/tests/indexes/ranges/test_setops.py
index 29ba5a91498db..d417b8b743dc5 100644
--- a/pandas/tests/indexes/ranges/test_setops.py
+++ b/pandas/tests/indexes/ranges/test_setops.py
@@ -11,12 +11,11 @@
import numpy as np
import pytest
-import pandas._testing as tm
-from pandas.core.indexes.api import (
+from pandas import (
Index,
- Int64Index,
RangeIndex,
)
+import pandas._testing as tm
class TestRangeIndexSetOps:
@@ -62,7 +61,7 @@ def test_intersection_empty(self, sort, names):
tm.assert_index_equal(result, index[:0].rename(names[2]), exact=True)
def test_intersection(self, sort):
- # intersect with Int64Index
+ # intersect with Index with dtype int64
index = RangeIndex(start=0, stop=20, step=2)
other = Index(np.arange(1, 6))
result = index.intersection(other, sort=sort)
@@ -133,7 +132,7 @@ def test_intersection_non_overlapping_gcd(self, sort, names):
tm.assert_index_equal(result, expected)
def test_union_noncomparable(self, sort):
- # corner case, non-Int64Index
+ # corner case, Index with non-int64 dtype
index = RangeIndex(start=0, stop=20, step=2)
other = Index([datetime.now() + timedelta(i) for i in range(4)], dtype=object)
result = index.union(other, sort=sort)
@@ -181,25 +180,25 @@ def test_union_noncomparable(self, sort):
RangeIndex(0, 10, 2),
RangeIndex(1, 10, 2),
RangeIndex(0, 10, 1),
- Int64Index(list(range(0, 10, 2)) + list(range(1, 10, 2))),
+ Index(list(range(0, 10, 2)) + list(range(1, 10, 2))),
),
(
RangeIndex(0, 11, 2),
RangeIndex(1, 12, 2),
RangeIndex(0, 12, 1),
- Int64Index(list(range(0, 11, 2)) + list(range(1, 12, 2))),
+ Index(list(range(0, 11, 2)) + list(range(1, 12, 2))),
),
(
RangeIndex(0, 21, 4),
RangeIndex(-2, 24, 4),
RangeIndex(-2, 24, 2),
- Int64Index(list(range(0, 21, 4)) + list(range(-2, 24, 4))),
+ Index(list(range(0, 21, 4)) + list(range(-2, 24, 4))),
),
(
RangeIndex(0, -20, -2),
RangeIndex(-1, -21, -2),
RangeIndex(-19, 1, 1),
- Int64Index(list(range(0, -20, -2)) + list(range(-1, -21, -2))),
+ Index(list(range(0, -20, -2)) + list(range(-1, -21, -2))),
),
(
RangeIndex(0, 100, 5),
@@ -211,13 +210,13 @@ def test_union_noncomparable(self, sort):
RangeIndex(0, -100, -5),
RangeIndex(5, -100, -20),
RangeIndex(-95, 10, 5),
- Int64Index(list(range(0, -100, -5)) + [5]),
+ Index(list(range(0, -100, -5)) + [5]),
),
(
RangeIndex(0, -11, -1),
RangeIndex(1, -12, -4),
RangeIndex(-11, 2, 1),
- Int64Index(list(range(0, -11, -1)) + [1, -11]),
+ Index(list(range(0, -11, -1)) + [1, -11]),
),
(RangeIndex(0), RangeIndex(0), RangeIndex(0), RangeIndex(0)),
(
@@ -236,7 +235,7 @@ def test_union_noncomparable(self, sort):
RangeIndex(0, -100, -2),
RangeIndex(-100, 50, 102),
RangeIndex(-100, 4, 2),
- Int64Index(list(range(0, -100, -2)) + [-100, 2]),
+ Index(list(range(0, -100, -2)) + [-100, 2]),
),
(
RangeIndex(0, -100, -1),
@@ -254,25 +253,25 @@ def test_union_noncomparable(self, sort):
RangeIndex(0, 10, 5),
RangeIndex(-5, -6, -20),
RangeIndex(-5, 10, 5),
- Int64Index([0, 5, -5]),
+ Index([0, 5, -5]),
),
(
RangeIndex(0, 3, 1),
RangeIndex(4, 5, 1),
- Int64Index([0, 1, 2, 4]),
- Int64Index([0, 1, 2, 4]),
+ Index([0, 1, 2, 4]),
+ Index([0, 1, 2, 4]),
),
(
RangeIndex(0, 10, 1),
- Int64Index([]),
+ Index([], dtype=np.int64),
RangeIndex(0, 10, 1),
RangeIndex(0, 10, 1),
),
(
RangeIndex(0),
- Int64Index([1, 5, 6]),
- Int64Index([1, 5, 6]),
- Int64Index([1, 5, 6]),
+ Index([1, 5, 6]),
+ Index([1, 5, 6]),
+ Index([1, 5, 6]),
),
# GH 43885
(
@@ -292,7 +291,7 @@ def test_union_sorted(self, idx1, idx2, expected_sorted, expected_notsorted):
tm.assert_index_equal(res1, expected_notsorted, exact=True)
res2 = idx2.union(idx1, sort=None)
- res3 = Int64Index(idx1._values, name=idx1.name).union(idx2, sort=None)
+ res3 = Index(idx1._values, name=idx1.name).union(idx2, sort=None)
tm.assert_index_equal(res2, expected_sorted, exact=True)
tm.assert_index_equal(res3, expected_sorted, exact="equiv")
@@ -302,7 +301,7 @@ def test_union_same_step_misaligned(self):
right = RangeIndex(range(1, 21, 4))
result = left.union(right)
- expected = Int64Index([0, 1, 4, 5, 8, 9, 12, 13, 16, 17])
+ expected = Index([0, 1, 4, 5, 8, 9, 12, 13, 16, 17])
tm.assert_index_equal(result, expected, exact=True)
def test_difference(self):
@@ -338,8 +337,8 @@ def test_difference(self):
tm.assert_index_equal(result, obj[:-3][::-1], exact=True)
result = obj.difference(obj[2:6])
- expected = Int64Index([1, 2, 7, 8, 9], name="foo")
- tm.assert_index_equal(result, expected)
+ expected = Index([1, 2, 7, 8, 9], name="foo")
+ tm.assert_index_equal(result, expected, exact=True)
def test_difference_sort(self):
# GH#44085 ensure we respect the sort keyword
@@ -402,25 +401,25 @@ def test_difference_interior_non_preserving(self):
other = idx[3:4]
result = idx.difference(other)
- expected = Int64Index([0, 1, 2, 4, 5, 6, 7, 8, 9])
+ expected = Index([0, 1, 2, 4, 5, 6, 7, 8, 9])
tm.assert_index_equal(result, expected, exact=True)
# case with other.step / self.step > 2
other = idx[::3]
result = idx.difference(other)
- expected = Int64Index([1, 2, 4, 5, 7, 8])
+ expected = Index([1, 2, 4, 5, 7, 8])
tm.assert_index_equal(result, expected, exact=True)
# cases with only reaching one end of left
obj = Index(range(20))
other = obj[:10:2]
result = obj.difference(other)
- expected = Int64Index([1, 3, 5, 7, 9] + list(range(10, 20)))
+ expected = Index([1, 3, 5, 7, 9] + list(range(10, 20)))
tm.assert_index_equal(result, expected, exact=True)
other = obj[1:11:2]
result = obj.difference(other)
- expected = Int64Index([0, 2, 4, 6, 8, 10] + list(range(11, 20)))
+ expected = Index([0, 2, 4, 6, 8, 10] + list(range(11, 20)))
tm.assert_index_equal(result, expected, exact=True)
def test_symmetric_difference(self):
@@ -436,8 +435,8 @@ def test_symmetric_difference(self):
tm.assert_index_equal(result, left.rename(None))
result = left[:-2].symmetric_difference(left[2:])
- expected = Int64Index([1, 2, 8, 9], name="foo")
- tm.assert_index_equal(result, expected)
+ expected = Index([1, 2, 8, 9], name="foo")
+ tm.assert_index_equal(result, expected, exact=True)
right = RangeIndex.from_range(range(10, 15))
@@ -446,8 +445,8 @@ def test_symmetric_difference(self):
tm.assert_index_equal(result, expected)
result = left.symmetric_difference(right[1:])
- expected = Int64Index([1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14])
- tm.assert_index_equal(result, expected)
+ expected = Index([1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14])
+ tm.assert_index_equal(result, expected, exact=True)
def assert_range_or_not_is_rangelike(index):
@@ -470,7 +469,7 @@ def assert_range_or_not_is_rangelike(index):
)
def test_range_difference(start1, stop1, step1, start2, stop2, step2):
# test that
- # a) we match Int64Index.difference and
+ # a) we match Index[int64].difference and
# b) we return RangeIndex whenever it is possible to do so.
assume(step1 != 0)
assume(step2 != 0)
@@ -481,11 +480,14 @@ def test_range_difference(start1, stop1, step1, start2, stop2, step2):
result = left.difference(right, sort=None)
assert_range_or_not_is_rangelike(result)
- alt = Int64Index(left).difference(Int64Index(right), sort=None)
+ left_int64 = Index(left.to_numpy())
+ right_int64 = Index(right.to_numpy())
+
+ alt = left_int64.difference(right_int64, sort=None)
tm.assert_index_equal(result, alt, exact="equiv")
result = left.difference(right, sort=False)
assert_range_or_not_is_rangelike(result)
- alt = Int64Index(left).difference(Int64Index(right), sort=False)
+ alt = left_int64.difference(right_int64, sort=False)
tm.assert_index_equal(result, alt, exact="equiv")
| Extracted from #50479 to make it more manageable.
Progress towards #42717. | https://api.github.com/repos/pandas-dev/pandas/pulls/50826 | 2023-01-18T15:19:36Z | 2023-01-19T21:12:32Z | 2023-01-19T21:12:32Z | 2023-01-19T21:26:11Z |
remove Int/Uint/Float64Index from pandas/tests/frame | diff --git a/pandas/tests/frame/methods/test_astype.py b/pandas/tests/frame/methods/test_astype.py
index 2d3d576154740..e9f6371239b4a 100644
--- a/pandas/tests/frame/methods/test_astype.py
+++ b/pandas/tests/frame/methods/test_astype.py
@@ -11,6 +11,7 @@
CategoricalDtype,
DataFrame,
DatetimeTZDtype,
+ Index,
Interval,
IntervalDtype,
NaT,
@@ -22,7 +23,6 @@
option_context,
)
import pandas._testing as tm
-from pandas.core.api import UInt64Index
def _check_cast(df, v):
@@ -372,7 +372,7 @@ def test_astype_extension_dtypes_duplicate_col(self, dtype):
)
def test_astype_column_metadata(self, dtype):
# GH#19920
- columns = UInt64Index([100, 200, 300], name="foo")
+ columns = Index([100, 200, 300], dtype=np.uint64, name="foo")
df = DataFrame(np.arange(15).reshape(5, 3), columns=columns)
df = df.astype(dtype)
tm.assert_index_equal(df.columns, columns)
@@ -441,7 +441,7 @@ def test_astype_to_datetime_unit(self, unit):
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
ser = df.iloc[:, 0]
- idx = pd.Index(ser)
+ idx = Index(ser)
dta = ser._values
if unit in ["ns", "us", "ms", "s"]:
@@ -476,7 +476,7 @@ def test_astype_to_datetime_unit(self, unit):
exp_dta = exp_ser._values
res_index = idx.astype(dtype)
- exp_index = pd.Index(exp_ser)
+ exp_index = Index(exp_ser)
assert exp_index.dtype == dtype
tm.assert_index_equal(res_index, exp_index)
@@ -504,7 +504,7 @@ def test_astype_to_timedelta_unit(self, unit):
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
ser = df.iloc[:, 0]
- tdi = pd.Index(ser)
+ tdi = Index(ser)
tda = tdi._values
if unit in ["us", "ms", "s"]:
diff --git a/pandas/tests/frame/methods/test_truncate.py b/pandas/tests/frame/methods/test_truncate.py
index 21f0664707ebe..149fcfb35f44d 100644
--- a/pandas/tests/frame/methods/test_truncate.py
+++ b/pandas/tests/frame/methods/test_truncate.py
@@ -5,11 +5,11 @@
from pandas import (
DataFrame,
DatetimeIndex,
+ Index,
Series,
date_range,
)
import pandas._testing as tm
-from pandas.core.api import Int64Index
class TestDataFrameTruncate:
@@ -108,13 +108,13 @@ def test_truncate_nonsortedindex_axis1(self):
"before, after, indices",
[(1, 2, [2, 1]), (None, 2, [2, 1, 0]), (1, None, [3, 2, 1])],
)
- @pytest.mark.parametrize("klass", [Int64Index, DatetimeIndex])
+ @pytest.mark.parametrize("dtyp", [*tm.ALL_REAL_NUMPY_DTYPES, "datetime64[ns]"])
def test_truncate_decreasing_index(
- self, before, after, indices, klass, frame_or_series
+ self, before, after, indices, dtyp, frame_or_series
):
# https://github.com/pandas-dev/pandas/issues/33756
- idx = klass([3, 2, 1, 0])
- if klass is DatetimeIndex:
+ idx = Index([3, 2, 1, 0], dtype=dtyp)
+ if isinstance(idx, DatetimeIndex):
before = pd.Timestamp(before) if before is not None else None
after = pd.Timestamp(after) if after is not None else None
indices = [pd.Timestamp(i) for i in indices]
| Extracted from #50479 to make it more manageable.
Progress towards #42717. | https://api.github.com/repos/pandas-dev/pandas/pulls/50825 | 2023-01-18T15:07:23Z | 2023-01-18T18:35:40Z | 2023-01-18T18:35:40Z | 2023-01-18T21:41:24Z |
Remove int64/uint64/float64 indexes from tests/resample & tests/reshape | diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py
index 0c8e303b4ac56..3ab57e137f1c1 100644
--- a/pandas/tests/resample/test_resampler_grouper.py
+++ b/pandas/tests/resample/test_resampler_grouper.py
@@ -15,7 +15,6 @@
Timestamp,
)
import pandas._testing as tm
-from pandas.core.api import Int64Index
from pandas.core.indexes.datetimes import date_range
test_frame = DataFrame(
@@ -333,7 +332,7 @@ def test_consistency_with_window():
# consistent return values with window
df = test_frame
- expected = Int64Index([1, 2, 3], name="A")
+ expected = Index([1, 2, 3], name="A")
result = df.groupby("A").resample("2s").mean()
assert result.index.nlevels == 2
tm.assert_index_equal(result.index.levels[0], expected)
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index 35d10eafb5ba7..e52687e3cfbc2 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -20,6 +20,7 @@
CategoricalIndex,
DataFrame,
DatetimeIndex,
+ Index,
IntervalIndex,
MultiIndex,
PeriodIndex,
@@ -29,11 +30,6 @@
)
import pandas._testing as tm
from pandas.api.types import CategoricalDtype as CDT
-from pandas.core.api import (
- Float64Index,
- Int64Index,
- UInt64Index,
-)
from pandas.core.reshape.concat import concat
from pandas.core.reshape.merge import (
MergeError,
@@ -1324,8 +1320,13 @@ def test_merge_two_empty_df_no_division_error(self):
["2001-01-01", "2002-02-02", "2003-03-03", pd.NaT, pd.NaT, pd.NaT]
),
),
- (Float64Index([1, 2, 3]), Float64Index([1, 2, 3, None, None, None])),
- (Int64Index([1, 2, 3]), Float64Index([1, 2, 3, None, None, None])),
+ *[
+ (
+ Index([1, 2, 3], dtype=dtyp),
+ Index([1, 2, 3, None, None, None], dtype=np.float64),
+ )
+ for dtyp in tm.ALL_REAL_NUMPY_DTYPES
+ ],
(
IntervalIndex.from_tuples([(1, 2), (2, 3), (3, 4)]),
IntervalIndex.from_tuples(
@@ -2142,15 +2143,13 @@ def test_merge_on_indexes(self, left_df, right_df, how, sort, expected):
@pytest.mark.parametrize(
"index",
- [
+ [Index([1, 2], dtype=dtyp, name="index_col") for dtyp in tm.ALL_REAL_NUMPY_DTYPES]
+ + [
CategoricalIndex(["A", "B"], categories=["A", "B"], name="index_col"),
- Float64Index([1.0, 2.0], name="index_col"),
- Int64Index([1, 2], name="index_col"),
- UInt64Index([1, 2], name="index_col"),
RangeIndex(start=0, stop=2, name="index_col"),
DatetimeIndex(["2018-01-01", "2018-01-02"], name="index_col"),
],
- ids=lambda x: type(x).__name__,
+ ids=lambda x: f"{type(x).__name__}[{x.dtype}]",
)
def test_merge_index_types(index):
# gh-20777
@@ -2652,11 +2651,11 @@ def test_merge_duplicate_columns_with_suffix_causing_another_duplicate_raises():
def test_merge_string_float_column_result():
# GH 13353
- df1 = DataFrame([[1, 2], [3, 4]], columns=pd.Index(["a", 114.0]))
+ df1 = DataFrame([[1, 2], [3, 4]], columns=Index(["a", 114.0]))
df2 = DataFrame([[9, 10], [11, 12]], columns=["x", "y"])
result = merge(df2, df1, how="inner", left_index=True, right_index=True)
expected = DataFrame(
- [[9, 10, 1, 2], [11, 12, 3, 4]], columns=pd.Index(["x", "y", "a", 114.0])
+ [[9, 10, 1, 2], [11, 12, 3, 4]], columns=Index(["x", "y", "a", 114.0])
)
tm.assert_frame_equal(result, expected)
@@ -2712,8 +2711,8 @@ def test_merge_outer_with_NaN(dtype):
def test_merge_different_index_names():
# GH#45094
- left = DataFrame({"a": [1]}, index=pd.Index([1], name="c"))
- right = DataFrame({"a": [1]}, index=pd.Index([1], name="d"))
+ left = DataFrame({"a": [1]}, index=Index([1], name="c"))
+ right = DataFrame({"a": [1]}, index=Index([1], name="d"))
result = merge(left, right, left_on="c", right_on="d")
expected = DataFrame({"a_x": [1], "a_y": 1})
tm.assert_frame_equal(result, expected)
| Extracted from #50479 to make it more manageable.
Progress towards #42717. | https://api.github.com/repos/pandas-dev/pandas/pulls/50823 | 2023-01-18T15:03:25Z | 2023-01-18T18:31:51Z | 2023-01-18T18:31:51Z | 2023-01-18T21:40:18Z |
DOC: Add 1.5.x note to main | diff --git a/doc/source/whatsnew/v1.5.3.rst b/doc/source/whatsnew/v1.5.3.rst
index 489a9b47c11e1..67c2347fe53ac 100644
--- a/doc/source/whatsnew/v1.5.3.rst
+++ b/doc/source/whatsnew/v1.5.3.rst
@@ -47,6 +47,7 @@ Other
as pandas works toward compatibility with SQLAlchemy 2.0.
- Reverted deprecation (:issue:`45324`) of behavior of :meth:`Series.__getitem__` and :meth:`Series.__setitem__` slicing with an integer :class:`Index`; this will remain positional (:issue:`49612`)
+- A ``FutureWarning`` raised when attempting to set values inplace with :meth:`DataFrame.loc` or :meth:`DataFrame.iloc` has been changed to a ``DeprecationWarning`` (:issue:`48673`)
.. ---------------------------------------------------------------------------
.. _whatsnew_153.contributors:
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50822 | 2023-01-18T14:40:08Z | 2023-01-18T16:06:01Z | 2023-01-18T16:06:01Z | 2023-01-18T18:10:05Z |
DOC: Fix whatsnew | diff --git a/doc/source/whatsnew/v1.5.3.rst b/doc/source/whatsnew/v1.5.3.rst
index 7488f5f755bf4..97c4c73f08c37 100644
--- a/doc/source/whatsnew/v1.5.3.rst
+++ b/doc/source/whatsnew/v1.5.3.rst
@@ -48,7 +48,7 @@ Other
as pandas works toward compatibility with SQLAlchemy 2.0.
- Reverted deprecation (:issue:`45324`) of behavior of :meth:`Series.__getitem__` and :meth:`Series.__setitem__` slicing with an integer :class:`Index`; this will remain positional (:issue:`49612`)
-- A ``FutureWarning`` raised when attempting to set values inplace with :meth:`DataFrame.loc` or :meth:`DataFrame.loc` has been changed to a ``DeprecationWarning`` (:issue:`48673`)
+- A ``FutureWarning`` raised when attempting to set values inplace with :meth:`DataFrame.loc` or :meth:`DataFrame.iloc` has been changed to a ``DeprecationWarning`` (:issue:`48673`)
.. ---------------------------------------------------------------------------
.. _whatsnew_153.contributors:
| cc @datapythonista | https://api.github.com/repos/pandas-dev/pandas/pulls/50821 | 2023-01-18T14:39:08Z | 2023-01-18T18:09:00Z | 2023-01-18T18:09:00Z | 2023-01-18T18:09:05Z |
update dev meeting frequency | diff --git a/doc/source/development/community.rst b/doc/source/development/community.rst
index c321c9b0cccf6..c536cafce3367 100644
--- a/doc/source/development/community.rst
+++ b/doc/source/development/community.rst
@@ -22,7 +22,7 @@ The pandas Community Meeting is a regular sync meeting for the project's
maintainers which is open to the community. Everyone is welcome to attend and
contribute to conversations.
-The meetings take place on the second Wednesday of each month at 18:00 UTC.
+The meetings take place on the second and fourth Wednesdays of each month at 18:00 UTC.
The minutes of past meetings are available in `this Google Document <https://docs.google.com/document/d/1tGbTiYORHiSPgVMXawiweGJlBw5dOkVJLY-licoBmBU/edit?usp=sharing>`__.
| Updated the dev meeting description to include the 2nd monthly meeting (4th Wednesday) | https://api.github.com/repos/pandas-dev/pandas/pulls/50820 | 2023-01-18T13:49:05Z | 2023-01-18T17:53:14Z | 2023-01-18T17:53:14Z | 2023-01-18T17:53:14Z |
DOC: Fix formatting issue with DataFrame.info docs | diff --git a/pandas/io/formats/info.py b/pandas/io/formats/info.py
index 27b61d502e9de..9a93724ca8f37 100644
--- a/pandas/io/formats/info.py
+++ b/pandas/io/formats/info.py
@@ -254,7 +254,7 @@
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
- the output.\
+ the output.
{max_cols_sub}
memory_usage : bool, str, optional
Specifies whether total memory usage of the {klass}
| Removed a backslash that was causing the description of the **max_cols** argument from the [DataFrme.info](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.info.html#pandas.DataFrame.info) method to be merged with the description of the **buf** argument.

| https://api.github.com/repos/pandas-dev/pandas/pulls/50816 | 2023-01-18T12:37:28Z | 2023-01-18T19:24:17Z | 2023-01-18T19:24:17Z | 2023-01-18T19:33:17Z |
Manually Backport PR #50809 | diff --git a/doc/source/whatsnew/v1.5.3.rst b/doc/source/whatsnew/v1.5.3.rst
index 88ae0df704e69..7488f5f755bf4 100644
--- a/doc/source/whatsnew/v1.5.3.rst
+++ b/doc/source/whatsnew/v1.5.3.rst
@@ -1,7 +1,7 @@
.. _whatsnew_153:
-What's new in 1.5.3 (December ??, 2022)
----------------------------------------
+What's new in 1.5.3 (January 18, 2023)
+--------------------------------------
These are the changes in pandas 1.5.3. See :ref:`release` for a full changelog
including other versions of pandas.
@@ -20,7 +20,7 @@ Fixed regressions
- Fixed regression in :meth:`.SeriesGroupBy.apply` setting a ``name`` attribute on the result if the result was a :class:`DataFrame` (:issue:`49907`)
- Fixed performance regression in setting with the :meth:`~DataFrame.at` indexer (:issue:`49771`)
- Fixed regression in the methods ``apply``, ``agg``, and ``transform`` when used with NumPy functions that informed users to supply ``numeric_only=True`` if the operation failed on non-numeric dtypes; such columns must be dropped prior to using these methods (:issue:`50538`)
--
+- Fixed regression in :func:`to_datetime` raising ``ValueError`` when parsing array of ``float`` containing ``np.nan`` (:issue:`50237`)
.. ---------------------------------------------------------------------------
.. _whatsnew_153.bug_fixes:
@@ -49,7 +49,6 @@ Other
- Reverted deprecation (:issue:`45324`) of behavior of :meth:`Series.__getitem__` and :meth:`Series.__setitem__` slicing with an integer :class:`Index`; this will remain positional (:issue:`49612`)
- A ``FutureWarning`` raised when attempting to set values inplace with :meth:`DataFrame.loc` or :meth:`DataFrame.loc` has been changed to a ``DeprecationWarning`` (:issue:`48673`)
--
.. ---------------------------------------------------------------------------
.. _whatsnew_153.contributors:
| xref #50809
| https://api.github.com/repos/pandas-dev/pandas/pulls/50813 | 2023-01-18T10:15:26Z | 2023-01-18T14:45:59Z | 2023-01-18T14:45:59Z | 2023-01-18T14:45:59Z |
CI/WEB: Fix github quota errors by using website as cache | diff --git a/.github/workflows/docbuild-and-upload.yml b/.github/workflows/docbuild-and-upload.yml
index 7a9f491228a83..908259597cafb 100644
--- a/.github/workflows/docbuild-and-upload.yml
+++ b/.github/workflows/docbuild-and-upload.yml
@@ -46,12 +46,6 @@ jobs:
- name: Build Pandas
uses: ./.github/actions/build_pandas
- - name: Set up maintainers cache
- uses: actions/cache@v3
- with:
- path: maintainers.json
- key: maintainers
-
- name: Build website
run: python web/pandas_web.py web/pandas --target-path=web/build
diff --git a/web/pandas/about/team.md b/web/pandas/about/team.md
index c3e5ef0a968eb..5229201ca7d36 100644
--- a/web/pandas/about/team.md
+++ b/web/pandas/about/team.md
@@ -9,7 +9,8 @@ If you want to support pandas development, you can find information in the [dona
## Active maintainers
<div class="card-group maintainers">
- {% for person in maintainers.active_with_github_info %}
+ {% for username in maintainers.active %}
+ {% set person = maintainers.github_info.get(username) %}
<div class="card">
<img class="card-img-top" alt="" src="{{ person.avatar_url }}"/>
<div class="card-body">
@@ -67,7 +68,8 @@ The project governance is available in the [project governance page](governance.
## Inactive maintainers
<ul>
- {% for person in maintainers.inactive_with_github_info %}
+ {% for username in maintainers.inactive %}
+ {% set person = maintainers.github_info.get(username) %}
<li>
<a href="{{ person.blog or person.html_url }}">
{{ person.name or person.login }}
diff --git a/web/pandas/config.yml b/web/pandas/config.yml
index 77dfac41ba4d7..816eb6ab296c1 100644
--- a/web/pandas/config.yml
+++ b/web/pandas/config.yml
@@ -1,10 +1,10 @@
main:
templates_path: _templates
base_template: "layout.html"
+ production_url: "https://pandas.pydata.org/"
ignore:
- _templates/layout.html
- config.yml
- - try.md # the binder page will be added later
github_repo_url: pandas-dev/pandas
context_preprocessors:
- pandas_web.Preprocessors.current_year
diff --git a/web/pandas_web.py b/web/pandas_web.py
index 4c30e1959fdff..e4568136edece 100755
--- a/web/pandas_web.py
+++ b/web/pandas_web.py
@@ -43,12 +43,6 @@
import requests
import yaml
-api_token = os.environ.get("GITHUB_TOKEN")
-if api_token is not None:
- GITHUB_API_HEADERS = {"Authorization": f"Bearer {api_token}"}
-else:
- GITHUB_API_HEADERS = {}
-
class Preprocessors:
"""
@@ -164,37 +158,39 @@ def maintainers_add_info(context):
Given the active maintainers defined in the yaml file, it fetches
the GitHub user information for them.
"""
- timestamp = time.time()
-
- cache_file = pathlib.Path("maintainers.json")
- if cache_file.is_file():
- with open(cache_file) as f:
- context["maintainers"] = json.load(f)
- # refresh cache after 1 hour
- if (timestamp - context["maintainers"]["timestamp"]) < 3_600:
- return context
-
- context["maintainers"]["timestamp"] = timestamp
-
repeated = set(context["maintainers"]["active"]) & set(
context["maintainers"]["inactive"]
)
if repeated:
raise ValueError(f"Maintainers {repeated} are both active and inactive")
- for kind in ("active", "inactive"):
- context["maintainers"][f"{kind}_with_github_info"] = []
- for user in context["maintainers"][kind]:
- resp = requests.get(
- f"https://api.github.com/users/{user}", headers=GITHUB_API_HEADERS
+ maintainers_info = {}
+ for user in (
+ context["maintainers"]["active"] + context["maintainers"]["inactive"]
+ ):
+ resp = requests.get(f"https://api.github.com/users/{user}")
+ if resp.status_code == 403:
+ sys.stderr.write(
+ "WARN: GitHub API quota exceeded when fetching maintainers\n"
+ )
+ # if we exceed github api quota, we use the github info
+ # of maintainers saved with the website
+ resp_bkp = requests.get(
+ context["main"]["production_url"] + "maintainers.json"
)
- if context["ignore_io_errors"] and resp.status_code == 403:
- return context
- resp.raise_for_status()
- context["maintainers"][f"{kind}_with_github_info"].append(resp.json())
+ resp_bkp.raise_for_status()
+ maintainers_info = resp_bkp.json()
+ break
- with open(cache_file, "w") as f:
- json.dump(context["maintainers"], f)
+ resp.raise_for_status()
+ maintainers_info[user] = resp.json()
+
+ context["maintainers"]["github_info"] = maintainers_info
+
+ # save the data fetched from github to use it in case we exceed
+ # git github api quota in the future
+ with open(pathlib.Path(context["target_path"]) / "maintainers.json", "w") as f:
+ json.dump(maintainers_info, f)
return context
@@ -203,15 +199,20 @@ def home_add_releases(context):
context["releases"] = []
github_repo_url = context["main"]["github_repo_url"]
- resp = requests.get(
- f"https://api.github.com/repos/{github_repo_url}/releases",
- headers=GITHUB_API_HEADERS,
- )
- if context["ignore_io_errors"] and resp.status_code == 403:
- return context
- resp.raise_for_status()
+ resp = requests.get(f"https://api.github.com/repos/{github_repo_url}/releases")
+ if resp.status_code == 403:
+ sys.stderr.write("WARN: GitHub API quota exceeded when fetching releases\n")
+ resp_bkp = requests.get(context["main"]["production_url"] + "releases.json")
+ resp_bkp.raise_for_status()
+ releases = resp_bkp.json()
+ else:
+ resp.raise_for_status()
+ releases = resp.json()
+
+ with open(pathlib.Path(context["target_path"]) / "releases.json", "w") as f:
+ json.dump(releases, f, default=datetime.datetime.isoformat)
- for release in resp.json():
+ for release in releases:
if release["prerelease"]:
continue
published = datetime.datetime.strptime(
@@ -229,6 +230,7 @@ def home_add_releases(context):
),
}
)
+
return context
@staticmethod
@@ -273,15 +275,22 @@ def roadmap_pdeps(context):
github_repo_url = context["main"]["github_repo_url"]
resp = requests.get(
"https://api.github.com/search/issues?"
- f"q=is:pr is:open label:PDEP repo:{github_repo_url}",
- headers=GITHUB_API_HEADERS,
+ f"q=is:pr is:open label:PDEP repo:{github_repo_url}"
)
- if context["ignore_io_errors"] and resp.status_code == 403:
- return context
- resp.raise_for_status()
+ if resp.status_code == 403:
+ sys.stderr.write("WARN: GitHub API quota exceeded when fetching pdeps\n")
+ resp_bkp = requests.get(context["main"]["production_url"] + "pdeps.json")
+ resp_bkp.raise_for_status()
+ pdeps = resp_bkp.json()
+ else:
+ resp.raise_for_status()
+ pdeps = resp.json()
- for pdep in resp.json()["items"]:
- context["pdeps"]["under_discussion"].append(
+ with open(pathlib.Path(context["target_path"]) / "pdeps.json", "w") as f:
+ json.dump(pdeps, f)
+
+ for pdep in pdeps["items"]:
+ context["pdeps"]["Under discussion"].append(
{"title": pdep["title"], "url": pdep["url"]}
)
@@ -314,7 +323,7 @@ def get_callable(obj_as_str: str) -> object:
return obj
-def get_context(config_fname: str, ignore_io_errors: bool, **kwargs):
+def get_context(config_fname: str, **kwargs):
"""
Load the config yaml as the base context, and enrich it with the
information added by the context preprocessors defined in the file.
@@ -323,7 +332,6 @@ def get_context(config_fname: str, ignore_io_errors: bool, **kwargs):
context = yaml.safe_load(f)
context["source_path"] = os.path.dirname(config_fname)
- context["ignore_io_errors"] = ignore_io_errors
context.update(kwargs)
preprocessors = (
@@ -361,7 +369,9 @@ def extend_base_template(content: str, base_template: str) -> str:
def main(
- source_path: str, target_path: str, base_url: str, ignore_io_errors: bool
+ source_path: str,
+ target_path: str,
+ base_url: str,
) -> int:
"""
Copy every file in the source directory to the target directory.
@@ -375,7 +385,7 @@ def main(
os.makedirs(target_path, exist_ok=True)
sys.stderr.write("Generating context...\n")
- context = get_context(config_fname, ignore_io_errors, base_url=base_url)
+ context = get_context(config_fname, base_url=base_url, target_path=target_path)
sys.stderr.write("Context generated\n")
templates_path = os.path.join(source_path, context["main"]["templates_path"])
@@ -419,15 +429,5 @@ def main(
parser.add_argument(
"--base-url", default="", help="base url where the website is served from"
)
- parser.add_argument(
- "--ignore-io-errors",
- action="store_true",
- help="do not fail if errors happen when fetching "
- "data from http sources, and those fail "
- "(mostly useful to allow GitHub quota errors "
- "when running the script locally)",
- )
args = parser.parse_args()
- sys.exit(
- main(args.source_path, args.target_path, args.base_url, args.ignore_io_errors)
- )
+ sys.exit(main(args.source_path, args.target_path, args.base_url))
| xref #50485
Looks like the previous approach didn't fix the github quota errors. @mroeschke I saw you also added a github token, but that didn't seem to work either.
What I'm doing here is to save the data we fetch from github in the website. For example https://pandas.pydata.org/maintainers.json (also for releases and pdeps). The, we request data from github normally, but if the quota is exceeded, if fallbacks to that json file with the data of the previous build. In the worst case scenario we'll have some delay updating the info in the website (we use the quota on PRs, and it can happen that for commits to main that update the cache files, we don't have quota for a while). But we shouldn't see any other CI failure caused by the github quota.
Since building the website shouldn't fail anymore, in the CI or locally, I remove the `--ignore-io-errors` parameter of the script, which doesn't make sense anymore. Also the previous cache, and the token, which I think they didn't help much.
I also found a typo when fetching the PDEPs under discussion, that was preventing to render then in the roadmap page. It's also being fixed here. | https://api.github.com/repos/pandas-dev/pandas/pulls/50811 | 2023-01-18T08:31:44Z | 2023-01-18T15:12:58Z | 2023-01-18T15:12:58Z | 2023-03-15T15:49:24Z |
TYP: fixed io.html._read typing | diff --git a/pandas/io/html.py b/pandas/io/html.py
index 7dcbd76b77b28..f025e12bd0f55 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -19,6 +19,7 @@
)
from pandas._typing import (
+ BaseBuffer,
FilePath,
ReadBuffer,
)
@@ -130,9 +131,7 @@ def _get_skiprows(skiprows: int | Sequence[int] | slice | None) -> int | Sequenc
raise TypeError(f"{type(skiprows).__name__} is not a valid type for skipping rows")
-def _read(
- obj: bytes | FilePath | ReadBuffer[str] | ReadBuffer[bytes], encoding: str | None
-) -> str | bytes:
+def _read(obj: FilePath | BaseBuffer, encoding: str | None) -> str | bytes:
"""
Try to read from a url, file or string.
@@ -150,13 +149,7 @@ def _read(
or hasattr(obj, "read")
or (isinstance(obj, str) and file_exists(obj))
):
- # error: Argument 1 to "get_handle" has incompatible type "Union[str, bytes,
- # Union[IO[Any], RawIOBase, BufferedIOBase, TextIOBase, TextIOWrapper, mmap]]";
- # expected "Union[PathLike[str], Union[str, Union[IO[Any], RawIOBase,
- # BufferedIOBase, TextIOBase, TextIOWrapper, mmap]]]"
- with get_handle(
- obj, "r", encoding=encoding # type: ignore[arg-type]
- ) as handles:
+ with get_handle(obj, "r", encoding=encoding) as handles:
text = handles.handle.read()
elif isinstance(obj, (str, bytes)):
text = obj
| xref https://github.com/pandas-dev/pandas/issues/37715 | https://api.github.com/repos/pandas-dev/pandas/pulls/50810 | 2023-01-18T06:50:58Z | 2023-01-18T19:22:31Z | 2023-01-18T19:22:31Z | 2023-01-18T20:05:44Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.