title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
CLN: remove ABCTimedelta
diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index 2c28a7bbb02d0..35c4b73b47695 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -22,9 +22,9 @@ cnp.import_array() from pandas._libs cimport util from pandas._libs.tslibs.nattype cimport c_NaT as NaT -from pandas._libs.tslibs.base cimport ABCTimedelta from pandas._libs.tslibs.period cimport is_period_object from pandas._libs.tslibs.timestamps cimport _Timestamp +from pandas._libs.tslibs.timedeltas cimport _Timedelta from pandas._libs.hashtable cimport HashTable @@ -471,7 +471,7 @@ cdef class TimedeltaEngine(DatetimeEngine): return 'm8[ns]' cdef int64_t _unbox_scalar(self, scalar) except? -1: - if not (isinstance(scalar, ABCTimedelta) or scalar is NaT): + if not (isinstance(scalar, _Timedelta) or scalar is NaT): raise TypeError(scalar) return scalar.value diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx index c0244b6e18a12..b5f5ef0a3f593 100644 --- a/pandas/_libs/interval.pyx +++ b/pandas/_libs/interval.pyx @@ -42,9 +42,9 @@ from pandas._libs.tslibs.util cimport ( is_timedelta64_object, ) -from pandas._libs.tslibs.base cimport ABCTimedelta from pandas._libs.tslibs.timezones cimport tz_compare from pandas._libs.tslibs.timestamps cimport _Timestamp +from pandas._libs.tslibs.timedeltas cimport _Timedelta _VALID_CLOSED = frozenset(['left', 'right', 'both', 'neither']) @@ -340,7 +340,7 @@ cdef class Interval(IntervalMixin): def _validate_endpoint(self, endpoint): # GH 23013 if not (is_integer_object(endpoint) or is_float_object(endpoint) or - isinstance(endpoint, (_Timestamp, ABCTimedelta))): + isinstance(endpoint, (_Timestamp, _Timedelta))): raise ValueError("Only numeric, Timestamp and Timedelta endpoints " "are allowed when constructing an Interval.") diff --git a/pandas/_libs/tslibs/base.pxd b/pandas/_libs/tslibs/base.pxd index d8c76542f3457..3bffff7aca43e 100644 --- a/pandas/_libs/tslibs/base.pxd +++ b/pandas/_libs/tslibs/base.pxd @@ -1,7 +1,4 @@ -from cpython.datetime cimport datetime, timedelta - -cdef class ABCTimedelta(timedelta): - pass +from cpython.datetime cimport datetime cdef class ABCTimestamp(datetime): diff --git a/pandas/_libs/tslibs/base.pyx b/pandas/_libs/tslibs/base.pyx index 6a5ee3f784334..1677a8b0be1ec 100644 --- a/pandas/_libs/tslibs/base.pyx +++ b/pandas/_libs/tslibs/base.pyx @@ -5,11 +5,7 @@ in order to allow for fast isinstance checks without circular dependency issues. This is analogous to core.dtypes.generic. """ -from cpython.datetime cimport datetime, timedelta - - -cdef class ABCTimedelta(timedelta): - pass +from cpython.datetime cimport datetime cdef class ABCTimestamp(datetime): diff --git a/pandas/_libs/tslibs/timedeltas.pxd b/pandas/_libs/tslibs/timedeltas.pxd index 95ddf8840e65d..70a418d7803d1 100644 --- a/pandas/_libs/tslibs/timedeltas.pxd +++ b/pandas/_libs/tslibs/timedeltas.pxd @@ -1,6 +1,18 @@ +from cpython.datetime cimport timedelta from numpy cimport int64_t # Exposed for tslib, not intended for outside use. cpdef int64_t delta_to_nanoseconds(delta) except? -1 cdef convert_to_timedelta64(object ts, object unit) cdef bint is_any_td_scalar(object obj) + + +cdef class _Timedelta(timedelta): + cdef readonly: + int64_t value # nanoseconds + object freq # frequency reference + bint is_populated # are my components populated + int64_t _d, _h, _m, _s, _ms, _us, _ns + + cpdef timedelta to_pytimedelta(_Timedelta self) + cpdef bint _has_ns(self) diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 10c1a56a2eb4e..f7bbf2c9e1c8b 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -21,7 +21,7 @@ from pandas._libs.tslibs.util cimport ( is_float_object, is_array ) -from pandas._libs.tslibs.base cimport ABCTimedelta, ABCTimestamp +from pandas._libs.tslibs.base cimport ABCTimestamp from pandas._libs.tslibs.conversion cimport cast_from_unit @@ -675,12 +675,12 @@ cdef _to_py_int_float(v): # timedeltas that we need to do object instantiation in python. This will # serve as a C extension type that shadows the Python class, where we do any # heavy lifting. -cdef class _Timedelta(ABCTimedelta): - cdef readonly: - int64_t value # nanoseconds - object freq # frequency reference - bint is_populated # are my components populated - int64_t _d, _h, _m, _s, _ms, _us, _ns +cdef class _Timedelta(timedelta): + # cdef readonly: + # int64_t value # nanoseconds + # object freq # frequency reference + # bint is_populated # are my components populated + # int64_t _d, _h, _m, _s, _ms, _us, _ns # higher than np.ndarray and np.matrix __array_priority__ = 100
Hoping we can eventually remove ABCTimestamp too, not sure.
https://api.github.com/repos/pandas-dev/pandas/pulls/34559
2020-06-03T20:41:27Z
2020-06-03T22:24:32Z
2020-06-03T22:24:32Z
2020-06-03T22:27:45Z
DOC: Add bcpandas to Ecosystem in docs
diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst index 62065f016e438..72e24e34bc5c1 100644 --- a/doc/source/ecosystem.rst +++ b/doc/source/ecosystem.rst @@ -320,6 +320,20 @@ provide a pandas-like and pandas-compatible toolkit for analytics on multi- dimensional arrays, rather than the tabular data for which pandas excels. +.. _ecosystem.io: + +IO +-- + +`BCPandas <https://github.com/yehoshuadimarsky/bcpandas>`__ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +BCPandas provides high performance writes from pandas to Microsoft SQL Server, +far exceeding the performance of the native ``df.to_sql`` method. Internally, it uses +Microsoft's BCP utility, but the complexity is fully abstracted away from the end user. +Rigorously tested, it is a complete replacement for ``df.to_sql``. + + .. _ecosystem.out-of-core: Out-of-core
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Hi, I wrote this utility which I use in production almost daily at my job, and it's been super helpful for me personally to fill a gap - slow writes from Pandas to MS SQL. Figured I'd suggest it in the pandas ecosystem if it could be useful to others. Wasn't sure where to put it, and didn't want to start a new section, so I put it in `Out-of-core`. It appeared to be sorted alphabetically, so I put it in that order.
https://api.github.com/repos/pandas-dev/pandas/pulls/34558
2020-06-03T18:46:57Z
2020-06-14T15:51:17Z
2020-06-14T15:51:17Z
2020-06-14T18:01:51Z
REF: simplify wrapping in apply_index
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 0deaf082dd1c7..77b60d0c22322 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -40,6 +40,7 @@ from pandas._libs.tslibs.ccalendar cimport DAY_NANOS, get_days_in_month, dayofwe from pandas._libs.tslibs.conversion cimport ( convert_datetime_to_tsobject, localize_pydatetime, + normalize_i8_timestamps, ) from pandas._libs.tslibs.nattype cimport NPY_NAT, c_NaT as NaT from pandas._libs.tslibs.np_datetime cimport ( @@ -79,21 +80,14 @@ cdef bint _is_normalized(datetime dt): def apply_index_wraps(func): # Note: normally we would use `@functools.wraps(func)`, but this does # not play nicely with cython class methods - def wrapper(self, other): - - is_index = not util.is_array(other._data) - - # operate on DatetimeArray - arr = other._data if is_index else other - - result = func(self, arr) + def wrapper(self, other) -> np.ndarray: + # other is a DatetimeArray - if is_index: - # Wrap DatetimeArray result back to DatetimeIndex - result = type(other)._simple_new(result, name=other.name) + result = func(self, other) + result = np.asarray(result) if self.normalize: - result = result.to_period('D').to_timestamp() + result = normalize_i8_timestamps(result.view("i8"), None) return result # do @functools.wraps(func) manually since it doesn't work on cdef funcs @@ -1889,7 +1883,7 @@ cdef class YearOffset(SingleConstructorOffset): shifted = shift_quarters( dtindex.asi8, self.n, self.month, self._day_opt, modby=12 ) - return type(dtindex)._simple_new(shifted, dtype=dtindex.dtype) + return shifted cdef class BYearEnd(YearOffset): @@ -2033,7 +2027,7 @@ cdef class QuarterOffset(SingleConstructorOffset): shifted = shift_quarters( dtindex.asi8, self.n, self.startingMonth, self._day_opt ) - return type(dtindex)._simple_new(shifted, dtype=dtindex.dtype) + return shifted cdef class BQuarterEnd(QuarterOffset): @@ -2139,7 +2133,7 @@ cdef class MonthOffset(SingleConstructorOffset): @apply_index_wraps def apply_index(self, dtindex): shifted = shift_months(dtindex.asi8, self.n, self._day_opt) - return type(dtindex)._simple_new(shifted, dtype=dtindex.dtype) + return shifted cpdef __setstate__(self, state): state.pop("_use_relativedelta", False) @@ -2503,8 +2497,6 @@ cdef class Week(SingleConstructorOffset): @apply_index_wraps def apply_index(self, dtindex): if self.weekday is None: - # integer addition on PeriodIndex is deprecated, - # so we use _time_shift directly td = timedelta(days=7 * self.n) td64 = np.timedelta64(td, "ns") return dtindex + td64 diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 7bbe60c4fdcd1..053aeb6d81be4 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -685,7 +685,9 @@ def _add_offset(self, offset): values = self.tz_localize(None) else: values = self - result = offset.apply_index(values).tz_localize(self.tz) + result = offset.apply_index(values) + result = DatetimeArray._simple_new(result) + result = result.tz_localize(self.tz) except NotImplementedError: warnings.warn( diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index 86cc7ff753660..e3a89d9ed57a6 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -3524,7 +3524,7 @@ def test_offset_whole_year(self): with tm.assert_produces_warning(None): # GH#22535 check that we don't get a FutureWarning from adding # an integer array to PeriodIndex - result = SemiMonthEnd().apply_index(s) + result = SemiMonthEnd() + s exp = DatetimeIndex(dates[1:]) tm.assert_index_equal(result, exp) @@ -3672,7 +3672,7 @@ def test_apply_index(self, case): with tm.assert_produces_warning(None): # GH#22535 check that we don't get a FutureWarning from adding # an integer array to PeriodIndex - result = offset.apply_index(s) + result = offset + s exp = DatetimeIndex(cases.values()) tm.assert_index_equal(result, exp) @@ -3783,7 +3783,7 @@ def test_offset_whole_year(self): with tm.assert_produces_warning(None): # GH#22535 check that we don't get a FutureWarning from adding # an integer array to PeriodIndex - result = SemiMonthBegin().apply_index(s) + result = SemiMonthBegin() + s exp = DatetimeIndex(dates[1:]) tm.assert_index_equal(result, exp) @@ -3936,7 +3936,7 @@ def test_apply_index(self, case): with tm.assert_produces_warning(None): # GH#22535 check that we don't get a FutureWarning from adding # an integer array to PeriodIndex - result = offset.apply_index(s) + result = offset + s exp = DatetimeIndex(cases.values()) tm.assert_index_equal(result, exp) diff --git a/pandas/tests/tseries/offsets/test_yqm_offsets.py b/pandas/tests/tseries/offsets/test_yqm_offsets.py index 13cab9be46d37..9921355bdf2ee 100644 --- a/pandas/tests/tseries/offsets/test_yqm_offsets.py +++ b/pandas/tests/tseries/offsets/test_yqm_offsets.py @@ -65,8 +65,6 @@ def test_apply_index(cls, n): res = rng + offset assert res.freq is None # not retained - res_v2 = offset.apply_index(rng) - assert (res == res_v2).all() assert res[0] == rng[0] + offset assert res[-1] == rng[-1] + offset res2 = ser + offset
Make the caller (DatetimeArray._add_offset) responsible for wrapping, so we can move the liboffsets methods towards only needing the ndarrays.
https://api.github.com/repos/pandas-dev/pandas/pulls/34555
2020-06-03T17:49:42Z
2020-06-03T22:24:51Z
2020-06-03T22:24:51Z
2020-06-03T22:28:14Z
CLN: Update imports
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index f11f3ad974b37..7bbe60c4fdcd1 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -406,7 +406,7 @@ def _generate_range( index = cls._simple_new(values, freq=freq, dtype=tz_to_dtype(_tz)) if tz is not None and index.tz is None: - arr = conversion.tz_localize_to_utc( + arr = tzconversion.tz_localize_to_utc( index.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent ) @@ -967,7 +967,7 @@ def tz_localize(self, tz, ambiguous="raise", nonexistent="raise"): tz = timezones.maybe_get_tz(tz) # Convert to UTC - new_dates = conversion.tz_localize_to_utc( + new_dates = tzconversion.tz_localize_to_utc( self.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent ) new_dates = new_dates.view(DT64NS_DTYPE) @@ -1881,7 +1881,7 @@ def sequence_to_dt64ns( dayfirst : bool, default False yearfirst : bool, default False ambiguous : str, bool, or arraylike, default 'raise' - See pandas._libs.tslibs.conversion.tz_localize_to_utc. + See pandas._libs.tslibs.tzconversion.tz_localize_to_utc. Returns ------- @@ -1961,7 +1961,7 @@ def sequence_to_dt64ns( if tz is not None: # Convert tz-naive to UTC tz = timezones.maybe_get_tz(tz) - data = conversion.tz_localize_to_utc( + data = tzconversion.tz_localize_to_utc( data.view("i8"), tz, ambiguous=ambiguous ) data = data.view(DT64NS_DTYPE)
https://api.github.com/repos/pandas-dev/pandas/pulls/34554
2020-06-03T17:30:20Z
2020-06-03T18:06:30Z
2020-06-03T18:06:30Z
2020-06-03T18:11:35Z
DOC: fix PR06 (parameter type) errors in Timestamp docstrings
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index fad87f9f910cb..2c30fd3cca4de 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -1053,7 +1053,7 @@ timedelta}, default 'raise' Parameters ---------- - locale : string, default None (English locale) + locale :str, default None (English locale) Locale determining the language in which to return the day name. Returns @@ -1070,7 +1070,7 @@ timedelta}, default 'raise' Parameters ---------- - locale : string, default None (English locale) + locale : str, default None (English locale) Locale determining the language in which to return the month name. Returns
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/34553
2020-06-03T16:56:44Z
2020-06-03T20:37:49Z
null
2020-06-03T20:37:50Z
DOC: fix PR06 errors in Timedeltas docstrings (parameter type)
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 10c1a56a2eb4e..76fa35f672c5e 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -1093,7 +1093,7 @@ class Timedelta(_Timedelta): Parameters ---------- - value : Timedelta, timedelta, np.timedelta64, string, or integer + value : Timedelta, timedelta, np.timedelta64, str, or int unit : str, default 'ns' Denote the unit of the input, if input is an integer.
- [x] closes #28253 - [ ] tests added / passed - [ ] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/34552
2020-06-03T15:39:46Z
2020-06-03T20:37:49Z
null
2020-06-03T20:37:50Z
Ci #34131 rm slow fixtures
diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index 08d8d5ca342b7..62e73f20efca5 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -205,7 +205,6 @@ def test_simple_cmp_ops(self): for lhs, rhs, cmp_op in product(bool_lhses, bool_rhses, self.cmp_ops): self.check_simple_cmp_op(lhs, cmp_op, rhs) - @pytest.mark.slow def test_binary_arith_ops(self): for lhs, op, rhs in product(self.lhses, self.arith_ops, self.rhses): self.check_binary_arith_op(lhs, op, rhs) @@ -224,17 +223,14 @@ def test_pow(self): for lhs, rhs in product(self.lhses, self.rhses): self.check_pow(lhs, "**", rhs) - @pytest.mark.slow def test_single_invert_op(self): for lhs, op, rhs in product(self.lhses, self.cmp_ops, self.rhses): self.check_single_invert_op(lhs, op, rhs) - @pytest.mark.slow def test_compound_invert_op(self): for lhs, op, rhs in product(self.lhses, self.cmp_ops, self.rhses): self.check_compound_invert_op(lhs, op, rhs) - @pytest.mark.slow def test_chained_cmp_op(self): mids = self.lhses cmp_ops = "<", ">" diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py index 6d786d9580542..1ddea11d0b102 100644 --- a/pandas/tests/frame/test_repr_info.py +++ b/pandas/tests/frame/test_repr_info.py @@ -34,7 +34,6 @@ def test_repr_mixed(self, float_string_frame): repr(float_string_frame) float_string_frame.info(verbose=False, buf=buf) - @pytest.mark.slow def test_repr_mixed_big(self): # big mixed biggie = DataFrame( @@ -81,7 +80,6 @@ def test_repr_dimensions(self): with option_context("display.show_dimensions", "truncate"): assert "2 rows x 2 columns" not in repr(df) - @pytest.mark.slow def test_repr_big(self): # big one biggie = DataFrame(np.zeros((200, 4)), columns=range(4), index=range(200)) diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py index 0a096acc9fa6d..c3de9dfd20224 100644 --- a/pandas/tests/plotting/test_boxplot_method.py +++ b/pandas/tests/plotting/test_boxplot_method.py @@ -78,13 +78,11 @@ def test_boxplot_legacy2(self): lines = list(itertools.chain.from_iterable(d.values())) assert len(ax.get_lines()) == len(lines) - @pytest.mark.slow def test_boxplot_return_type_none(self): # GH 12216; return_type=None & by=None -> axes result = self.hist_df.boxplot() assert isinstance(result, self.plt.Axes) - @pytest.mark.slow def test_boxplot_return_type_legacy(self): # API change in https://github.com/pandas-dev/pandas/pull/7096 import matplotlib as mpl # noqa @@ -112,7 +110,6 @@ def test_boxplot_return_type_legacy(self): result = df.boxplot(return_type="both") self._check_box_return_type(result, "both") - @pytest.mark.slow def test_boxplot_axis_limits(self): def _check_ax_limits(col, ax): y_min, y_max = ax.get_ylim() @@ -139,13 +136,11 @@ def _check_ax_limits(col, ax): assert age_ax._sharey == height_ax assert dummy_ax._sharey is None - @pytest.mark.slow def test_boxplot_empty_column(self): df = DataFrame(np.random.randn(20, 4)) df.loc[:, 0] = np.nan _check_plot_works(df.boxplot, return_type="axes") - @pytest.mark.slow def test_figsize(self): df = DataFrame(np.random.rand(10, 5), columns=["A", "B", "C", "D", "E"]) result = df.boxplot(return_type="axes", figsize=(12, 8)) @@ -253,7 +248,6 @@ def test_boxplot_legacy3(self): axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes") self._check_axes_shape(axes, axes_num=1, layout=(1, 1)) - @pytest.mark.slow def test_grouped_plot_fignums(self): n = 10 weight = Series(np.random.normal(166, 20, size=n)) diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index 738df5244955a..bb4237fb4ea08 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -250,7 +250,6 @@ def test_plot_offset_freq(self): ser = Series(np.random.randn(len(dr)), index=dr) _check_plot_works(ser.plot) - @pytest.mark.slow def test_plot_multiple_inferred_freq(self): dr = Index([datetime(2000, 1, 1), datetime(2000, 1, 6), datetime(2000, 1, 11)]) ser = Series(np.random.randn(len(dr)), index=dr) @@ -353,7 +352,6 @@ def test_dataframe(self): idx = ax.get_lines()[0].get_xdata() tm.assert_index_equal(bts.index.to_period(), PeriodIndex(idx)) - @pytest.mark.slow def test_axis_limits(self): def _test(ax): xlim = ax.get_xlim() @@ -450,7 +448,6 @@ def test_finder_quarterly(self): assert rs1 == xpl1 assert rs2 == xpl2 - @pytest.mark.slow def test_finder_monthly(self): yrs = [1.15, 2.5, 4, 11] @@ -483,7 +480,6 @@ def test_finder_monthly_long(self): xp = Period("1989Q1", "M").ordinal assert rs == xp - @pytest.mark.slow def test_finder_annual(self): xp = [1987, 1988, 1990, 1990, 1995, 2020, 2070, 2170] xp = [Period(x, freq="A").ordinal for x in xp] @@ -499,7 +495,6 @@ def test_finder_annual(self): assert rs == xp - @pytest.mark.slow def test_finder_minutely(self): nminutes = 50 * 24 * 60 rng = date_range("1/1/1999", freq="Min", periods=nminutes) @@ -579,7 +574,6 @@ def test_gaps(self): mask = data.mask assert mask[2:5, 1].all() - @pytest.mark.slow def test_gap_upsample(self): low = tm.makeTimeSeries() low[5:25] = np.nan @@ -786,7 +780,6 @@ def test_mixed_freq_alignment(self): assert ax.lines[0].get_xdata()[0] == ax.lines[1].get_xdata()[0] - @pytest.mark.slow def test_mixed_freq_lf_first(self): idxh = date_range("1/1/1999", periods=365, freq="D") @@ -1131,7 +1124,6 @@ def test_time_musec(self): xp = time(h, m, s, us).strftime("%H:%M") assert xp == rs - @pytest.mark.slow def test_secondary_upsample(self): idxh = date_range("1/1/1999", periods=365, freq="D") idxl = date_range("1/1/1999", periods=12, freq="M") diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index c84a09f21f46b..ad76509b51169 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -248,7 +248,6 @@ def test_plot_xy(self): # columns.inferred_type == 'mixed' # TODO add MultiIndex test - @pytest.mark.slow @pytest.mark.parametrize( "input_log, expected_log", [(True, "log"), ("sym", "symlog")] ) @@ -277,7 +276,6 @@ def test_invalid_logscale(self, input_param): with pytest.raises(ValueError, match=msg): df.plot(**{input_param: "sm"}) - @pytest.mark.slow def test_xcompat(self): import pandas as pd @@ -966,7 +964,6 @@ def test_bar_user_colors(self): ] assert result == expected - @pytest.mark.slow def test_bar_linewidth(self): df = DataFrame(randn(5, 5)) @@ -1110,7 +1107,6 @@ def test_bar_nan(self): expected = [0.0, 0.0, 0.0, 10.0, 0.0, 20.0, 15.0, 10.0, 40.0] assert result == expected - @pytest.mark.slow def test_bar_categorical(self): # GH 13019 df1 = pd.DataFrame( @@ -1254,7 +1250,6 @@ def test_plot_scatter_with_categorical_data(self, x, y): _check_plot_works(df.plot.scatter, x=x, y=y) - @pytest.mark.slow def test_plot_scatter_with_c(self): df = DataFrame( randn(6, 4), @@ -1460,7 +1455,6 @@ def test_bar_stacked_center(self): self._check_bar_alignment(df, kind="barh", stacked=True) self._check_bar_alignment(df, kind="barh", stacked=True, width=0.9) - @pytest.mark.slow def test_bar_center(self): df = DataFrame({"A": [3] * 5, "B": list(range(5))}, index=range(5)) self._check_bar_alignment(df, kind="bar", stacked=False) @@ -1468,7 +1462,6 @@ def test_bar_center(self): self._check_bar_alignment(df, kind="barh", stacked=False) self._check_bar_alignment(df, kind="barh", stacked=False, width=0.9) - @pytest.mark.slow def test_bar_subplots_center(self): df = DataFrame({"A": [3] * 5, "B": list(range(5))}, index=range(5)) self._check_bar_alignment(df, kind="bar", subplots=True) @@ -1592,7 +1585,6 @@ def test_boxplot_vertical(self): tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), positions) assert len(ax.lines) == self.bp_n_objects * len(numeric_cols) - @pytest.mark.slow def test_boxplot_return_type(self): df = DataFrame( randn(6, 4), @@ -1653,7 +1645,6 @@ def test_kde_df(self): axes = df.plot(kind="kde", logy=True, subplots=True) self._check_ax_scales(axes, yaxis="log") - @pytest.mark.slow @td.skip_if_no_scipy def test_kde_missing_vals(self): df = DataFrame(np.random.uniform(size=(100, 4))) @@ -1870,7 +1861,6 @@ def test_hist_df_coord(self): expected_w=np.array([6, 7, 8, 9, 10]), ) - @pytest.mark.slow def test_plot_int_columns(self): df = DataFrame(randn(100, 4)).cumsum() _check_plot_works(df.plot, legend=True) @@ -1991,7 +1981,6 @@ def test_legend_name(self): leg_title = ax.legend_.get_title() self._check_text_labels(leg_title, "new") - @pytest.mark.slow def test_no_legend(self): kinds = ["line", "bar", "barh", "kde", "area", "hist"] df = DataFrame(rand(3, 3), columns=["a", "b", "c"]) @@ -2001,7 +1990,6 @@ def test_no_legend(self): ax = df.plot(kind=kind, legend=False) self._check_legend_labels(ax, visible=False) - @pytest.mark.slow def test_style_by_column(self): import matplotlib.pyplot as plt @@ -2029,7 +2017,6 @@ def test_line_label_none(self): ax = s.plot(legend=True) assert ax.get_legend().get_texts()[0].get_text() == "None" - @pytest.mark.slow def test_line_colors(self): from matplotlib import cm @@ -2968,7 +2955,6 @@ def test_memory_leak(self): # need to actually access something to get an error results[key].lines - @pytest.mark.slow def test_df_subplots_patterns_minorticks(self): # GH 10657 import matplotlib.pyplot as plt diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py index 5a30e9fbb91c6..69c03b000f2c2 100644 --- a/pandas/tests/plotting/test_hist_method.py +++ b/pandas/tests/plotting/test_hist_method.py @@ -46,13 +46,11 @@ def test_hist_legacy(self): with pytest.raises(ValueError): self.ts.hist(by=self.ts.index, figure=fig) - @pytest.mark.slow def test_hist_bins_legacy(self): df = DataFrame(np.random.randn(10, 2)) ax = df.hist(bins=2)[0][0] assert len(ax.patches) == 2 - @pytest.mark.slow def test_hist_layout(self): df = self.hist_df with pytest.raises(ValueError): @@ -99,7 +97,6 @@ def test_hist_layout_with_by(self): axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7)) self._check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 7)) - @pytest.mark.slow def test_hist_no_overlap(self): from matplotlib.pyplot import subplot, gcf @@ -113,13 +110,11 @@ def test_hist_no_overlap(self): axes = fig.axes assert len(axes) == 2 - @pytest.mark.slow def test_hist_by_no_extra_plots(self): df = self.hist_df axes = df.height.hist(by=df.gender) # noqa assert len(self.plt.get_fignums()) == 1 - @pytest.mark.slow def test_plot_fails_when_ax_differs_from_figure(self): from pylab import figure @@ -201,7 +196,6 @@ def test_hist_df_legacy(self): with pytest.raises(AttributeError): ser.hist(foo="bar") - @pytest.mark.slow def test_hist_non_numerical_raises(self): # gh-10444 df = DataFrame(np.random.rand(10, 2)) @@ -357,7 +351,6 @@ def test_grouped_hist_legacy(self): with pytest.raises(ValueError, match=msg): df.hist(by="C", figsize="default") - @pytest.mark.slow def test_grouped_hist_legacy2(self): n = 10 weight = Series(np.random.normal(166, 20, size=n)) @@ -426,7 +419,6 @@ def test_grouped_hist_layout(self): axes = df.hist(column=["height", "weight", "category"]) self._check_axes_shape(axes, axes_num=3, layout=(2, 2)) - @pytest.mark.slow def test_grouped_hist_multiple_axes(self): # GH 6970, GH 7069 df = self.hist_df @@ -446,7 +438,6 @@ def test_grouped_hist_multiple_axes(self): # pass different number of axes from required axes = df.hist(column="height", ax=axes) - @pytest.mark.slow def test_axis_share_x(self): df = self.hist_df # GH4089 @@ -460,7 +451,6 @@ def test_axis_share_x(self): assert not ax1._shared_y_axes.joined(ax1, ax2) assert not ax2._shared_y_axes.joined(ax1, ax2) - @pytest.mark.slow def test_axis_share_y(self): df = self.hist_df ax1, ax2 = df.hist(column="height", by=df.gender, sharey=True) @@ -473,7 +463,6 @@ def test_axis_share_y(self): assert not ax1._shared_x_axes.joined(ax1, ax2) assert not ax2._shared_x_axes.joined(ax1, ax2) - @pytest.mark.slow def test_axis_share_xy(self): df = self.hist_df ax1, ax2 = df.hist(column="height", by=df.gender, sharex=True, sharey=True) diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py index 27039948dfc16..c3c8c4588a862 100644 --- a/pandas/tests/plotting/test_misc.py +++ b/pandas/tests/plotting/test_misc.py @@ -68,7 +68,6 @@ def setup_method(self, method): self.ts = tm.makeTimeSeries() self.ts.name = "ts" - @pytest.mark.slow def test_autocorrelation_plot(self): from pandas.plotting import autocorrelation_plot @@ -78,14 +77,12 @@ def test_autocorrelation_plot(self): ax = autocorrelation_plot(self.ts, label="Test") self._check_legend_labels(ax, labels=["Test"]) - @pytest.mark.slow def test_lag_plot(self): from pandas.plotting import lag_plot _check_plot_works(lag_plot, series=self.ts) _check_plot_works(lag_plot, series=self.ts, lag=5) - @pytest.mark.slow def test_bootstrap_plot(self): from pandas.plotting import bootstrap_plot @@ -200,7 +197,6 @@ def test_andrews_curves(self, iris): handles, labels = ax.get_legend_handles_labels() self._check_colors(handles, linecolors=colors) - @pytest.mark.slow def test_parallel_coordinates(self, iris): from pandas.plotting import parallel_coordinates from matplotlib import cm @@ -405,7 +401,6 @@ def test_get_standard_colors_no_appending(self): p = df.A.plot.bar(figsize=(16, 7), color=color_list) assert p.patches[1].get_facecolor() == p.patches[17].get_facecolor() - @pytest.mark.slow def test_dictionary_color(self): # issue-8193 # Test plot color dictionary format diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py index 5341878d4986e..420d4355f46d6 100644 --- a/pandas/tests/plotting/test_series.py +++ b/pandas/tests/plotting/test_series.py @@ -35,7 +35,6 @@ def setup_method(self, method): self.iseries = tm.makePeriodSeries() self.iseries.name = "iseries" - @pytest.mark.slow def test_plot(self): _check_plot_works(self.ts.plot, label="foo") _check_plot_works(self.ts.plot, use_index=False) @@ -71,7 +70,6 @@ def test_plot(self): ax = _check_plot_works(self.ts.plot, subplots=True, layout=(1, -1)) self._check_axes_shape(ax, axes_num=1, layout=(1, 1)) - @pytest.mark.slow def test_plot_figsize_and_title(self): # figsize and title _, ax = self.plt.subplots() @@ -209,7 +207,6 @@ def test_line_use_index_false(self): label2 = ax2.get_xlabel() assert label2 == "" - @pytest.mark.slow def test_bar_log(self): expected = np.array([1e-1, 1e0, 1e1, 1e2, 1e3, 1e4]) @@ -243,7 +240,6 @@ def test_bar_log(self): tm.assert_almost_equal(res[1], ymax) tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected) - @pytest.mark.slow def test_bar_ignore_index(self): df = Series([1, 2, 3, 4], index=["a", "b", "c", "d"]) _, ax = self.plt.subplots() @@ -352,14 +348,12 @@ def test_pie_nan(self): result = [x.get_text() for x in ax.texts] assert result == expected - @pytest.mark.slow def test_hist_df_kwargs(self): df = DataFrame(np.random.randn(10, 2)) _, ax = self.plt.subplots() ax = df.plot.hist(bins=5, ax=ax) assert len(ax.patches) == 10 - @pytest.mark.slow def test_hist_df_with_nonnumerics(self): # GH 9853 with tm.RNGContext(1): @@ -397,13 +391,11 @@ def test_hist_legacy(self): with pytest.raises(ValueError): self.ts.hist(by=self.ts.index, figure=fig) - @pytest.mark.slow def test_hist_bins_legacy(self): df = DataFrame(np.random.randn(10, 2)) ax = df.hist(bins=2)[0][0] assert len(ax.patches) == 2 - @pytest.mark.slow def test_hist_layout(self): df = self.hist_df with pytest.raises(ValueError): @@ -448,7 +440,6 @@ def test_hist_layout_with_by(self): axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7)) self._check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 7)) - @pytest.mark.slow def test_hist_no_overlap(self): from matplotlib.pyplot import subplot, gcf @@ -462,7 +453,6 @@ def test_hist_no_overlap(self): axes = fig.axes assert len(axes) == 2 - @pytest.mark.slow def test_hist_secondary_legend(self): # GH 9610 df = DataFrame(np.random.randn(30, 4), columns=list("abcd")) @@ -501,7 +491,6 @@ def test_hist_secondary_legend(self): assert ax.get_yaxis().get_visible() tm.close() - @pytest.mark.slow def test_df_series_secondary_legend(self): # GH 9779 df = DataFrame(np.random.randn(30, 3), columns=list("abc")) @@ -565,7 +554,6 @@ def test_df_series_secondary_legend(self): assert ax.get_yaxis().get_visible() tm.close() - @pytest.mark.slow @pytest.mark.parametrize( "input_logy, expected_scale", [(True, "log"), ("sym", "symlog")] ) @@ -581,7 +569,6 @@ def test_secondary_logy(self, input_logy, expected_scale): assert ax1.get_yscale() == expected_scale assert ax2.get_yscale() == expected_scale - @pytest.mark.slow def test_plot_fails_with_dupe_color_and_style(self): x = Series(randn(2)) with pytest.raises(ValueError): @@ -625,7 +612,6 @@ def test_kde_kwargs(self): self._check_ax_scales(ax, yaxis="log") self._check_text_labels(ax.yaxis.get_label(), "Density") - @pytest.mark.slow @td.skip_if_no_scipy def test_kde_missing_vals(self): s = Series(np.random.uniform(size=50)) @@ -635,7 +621,6 @@ def test_kde_missing_vals(self): # gh-14821: check if the values have any missing values assert any(~np.isnan(axes.lines[0].get_xdata())) - @pytest.mark.slow def test_hist_kwargs(self): _, ax = self.plt.subplots() ax = self.ts.plot.hist(bins=5, ax=ax) @@ -652,7 +637,6 @@ def test_hist_kwargs(self): ax = self.ts.plot.hist(align="left", stacked=True, ax=ax) tm.close() - @pytest.mark.slow @td.skip_if_no_scipy def test_hist_kde_color(self): _, ax = self.plt.subplots() @@ -668,7 +652,6 @@ def test_hist_kde_color(self): assert len(lines) == 1 self._check_colors(lines, ["r"]) - @pytest.mark.slow def test_boxplot_series(self): _, ax = self.plt.subplots() ax = self.ts.plot.box(logy=True, ax=ax) @@ -678,7 +661,6 @@ def test_boxplot_series(self): ylabels = ax.get_yticklabels() self._check_text_labels(ylabels, [""] * len(ylabels)) - @pytest.mark.slow def test_kind_both_ways(self): s = Series(range(3)) kinds = ( @@ -690,7 +672,6 @@ def test_kind_both_ways(self): s.plot(kind=kind, ax=ax) getattr(s.plot, kind)() - @pytest.mark.slow def test_invalid_plot_data(self): s = Series(list("abcd")) _, ax = self.plt.subplots() @@ -720,7 +701,6 @@ def test_invalid_kind(self): with pytest.raises(ValueError): s.plot(kind="aasdf") - @pytest.mark.slow def test_dup_datetime_index_plot(self): dr1 = date_range("1/1/2009", periods=4) dr2 = date_range("1/2/2009", periods=4) @@ -783,7 +763,6 @@ def test_series_grid_settings(self): plotting.PlotAccessor._series_kinds + plotting.PlotAccessor._common_kinds, ) - @pytest.mark.slow def test_standard_colors(self): from pandas.plotting._matplotlib.style import _get_standard_colors @@ -800,7 +779,6 @@ def test_standard_colors(self): result = _get_standard_colors(3, color=[c]) assert result == [c] * 3 - @pytest.mark.slow def test_standard_colors_all(self): import matplotlib.colors as colors from pandas.plotting._matplotlib.style import _get_standard_colors diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index c07a5673fe503..9df10e2a29dd5 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -1985,7 +1985,6 @@ def test_pivot_string_func_vs_func(self, f, f_numpy): expected = pivot_table(self.data, index="A", columns="B", aggfunc=f_numpy) tm.assert_frame_equal(result, expected) - @pytest.mark.slow def test_pivot_number_of_levels_larger_than_int32(self): # GH 20601 df = DataFrame( diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py index 0b34fab7b80b1..ab07c5a9fbe48 100644 --- a/pandas/tests/series/indexing/test_datetime.py +++ b/pandas/tests/series/indexing/test_datetime.py @@ -82,7 +82,6 @@ def test_series_set_value(): tm.assert_series_equal(s, expected) -@pytest.mark.slow def test_slice_locs_indexerror(): times = [datetime(2000, 1, 1) + timedelta(minutes=i * 10) for i in range(100000)] s = Series(range(100000), times) diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 1ba73292dc0b4..67791058ea750 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -830,7 +830,6 @@ def test_unstack_unobserved_keys(self): recons = result.stack() tm.assert_frame_equal(recons, df) - @pytest.mark.slow def test_unstack_number_of_levels_larger_than_int32(self): # GH 20601 df = DataFrame( diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py index 98297474243e4..ea586a3771877 100644 --- a/pandas/tests/test_sorting.py +++ b/pandas/tests/test_sorting.py @@ -19,7 +19,6 @@ class TestSorting: - @pytest.mark.slow def test_int64_overflow(self): B = np.concatenate((np.arange(1000), np.arange(1000), np.arange(500)))
- [ x ] Part of #34131 - [ x ] passes `black pandas` - [ x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ FAIL ] tests passed for 11 of 12 files changed. 6 tests fail inside the file `pandas/core/arrays/datetimes.py` , but there are not concerned by this PR. So I decide to push anyway. I went through tests tagged with `@pytest.mark.slow` and take a look at the duration. The test is unmarked if the test duration is below 1 second. note: I considered the total time for each combination in case of parameters A total of 76 flags `slow` were unmark, so we can focus on "real" slow tests.
https://api.github.com/repos/pandas-dev/pandas/pulls/34549
2020-06-03T13:25:02Z
2020-06-07T08:59:01Z
null
2020-06-07T08:59:19Z
DEPR: Deprecate tshift and integrate it to shift
diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst index 5351c3ee6b624..648d93a45d210 100644 --- a/doc/source/user_guide/timeseries.rst +++ b/doc/source/user_guide/timeseries.rst @@ -516,7 +516,7 @@ The ``DatetimeIndex`` class contains many time series related optimizations: * A large range of dates for various offsets are pre-computed and cached under the hood in order to make generating subsequent date ranges very fast (just have to grab a slice). -* Fast shifting using the ``shift`` and ``tshift`` method on pandas objects. +* Fast shifting using the ``shift`` method on pandas objects. * Unioning of overlapping ``DatetimeIndex`` objects with the same frequency is very fast (important for fast data alignment). * Quick access to date fields via properties such as ``year``, ``month``, etc. @@ -1462,23 +1462,19 @@ the pandas objects. The ``shift`` method accepts an ``freq`` argument which can accept a ``DateOffset`` class or other ``timedelta``-like object or also an -:ref:`offset alias <timeseries.offset_aliases>`: +:ref:`offset alias <timeseries.offset_aliases>`. + +When ``freq`` is specified, ``shift`` method changes all the dates in the index +rather than changing the alignment of the data and the index: .. ipython:: python + ts.shift(5, freq='D') ts.shift(5, freq=pd.offsets.BDay()) ts.shift(5, freq='BM') -Rather than changing the alignment of the data and the index, ``DataFrame`` and -``Series`` objects also have a :meth:`~Series.tshift` convenience method that -changes all the dates in the index by a specified number of offsets: - -.. ipython:: python - - ts.tshift(5, freq='D') - -Note that with ``tshift``, the leading entry is no longer NaN because the data -is not being realigned. +Note that with when ``freq`` is specified, the leading entry is no longer NaN +because the data is not being realigned. Frequency conversion ~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 5f8668f85c3b3..3f25baa6b7fb4 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -748,6 +748,7 @@ Deprecations - :meth:`DatetimeIndex.week` and `DatetimeIndex.weekofyear` are deprecated and will be removed in a future version, use :meth:`DatetimeIndex.isocalendar().week` instead (:issue:`33595`) - :meth:`DatetimeArray.week` and `DatetimeArray.weekofyear` are deprecated and will be removed in a future version, use :meth:`DatetimeArray.isocalendar().week` instead (:issue:`33595`) - :meth:`DateOffset.__call__` is deprecated and will be removed in a future version, use ``offset + other`` instead (:issue:`34171`) +- :meth:`DataFrame.tshift` and :meth:`Series.tshift` are deprecated and will be removed in a future version, use :meth:`DataFrame.shift` and :meth:`Series.shift` instead (:issue:`11631`) - Indexing an :class:`Index` object with a float key is deprecated, and will raise an ``IndexError`` in the future. You can manually convert to an integer key instead (:issue:`34191`). diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 9dcdcaca2f689..7c3e975c889e1 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -182,7 +182,7 @@ class NDFrame(PandasObject, SelectionMixin, indexing.IndexingMixin): ] _internal_names_set: Set[str] = set(_internal_names) _accessors: Set[str] = set() - _deprecations: FrozenSet[str] = frozenset(["get_values"]) + _deprecations: FrozenSet[str] = frozenset(["get_values", "tshift"]) _metadata: List[str] = [] _is_copy = None _mgr: BlockManager @@ -9162,7 +9162,9 @@ def shift( When `freq` is not passed, shift the index without realigning the data. If `freq` is passed (in this case, the index must be date or datetime, or it will raise a `NotImplementedError`), the index will be - increased using the periods and the `freq`. + increased using the periods and the `freq`. `freq` can be inferred + when specified as "infer" as long as either freq or inferred_freq + attribute is set in the index. Parameters ---------- @@ -9173,6 +9175,9 @@ def shift( If `freq` is specified then the index values are shifted but the data is not realigned. That is, use `freq` if you would like to extend the index when shifting and preserve the original data. + If `freq` is specified as "infer" then it will be inferred from + the freq or inferred_freq attributes of the index. If neither of + those attributes exist, a ValueError is thrown axis : {{0 or 'index', 1 or 'columns', None}}, default None Shift direction. fill_value : object, optional @@ -9182,7 +9187,7 @@ def shift( For datetime, timedelta, or period data, etc. :attr:`NaT` is used. For extension dtypes, ``self.dtype.na_value`` is used. - .. versionchanged:: 0.24.0 + .. versionchanged:: 1.1.0 Returns ------- @@ -9199,46 +9204,99 @@ def shift( Examples -------- - >>> df = pd.DataFrame({{'Col1': [10, 20, 15, 30, 45], - ... 'Col2': [13, 23, 18, 33, 48], - ... 'Col3': [17, 27, 22, 37, 52]}}) + >>> df = pd.DataFrame({{"Col1": [10, 20, 15, 30, 45], + ... "Col2": [13, 23, 18, 33, 48], + ... "Col3": [17, 27, 22, 37, 52]}}, + ... index=pd.date_range("2020-01-01", "2020-01-05")) + >>> df + Col1 Col2 Col3 + 2020-01-01 10 13 17 + 2020-01-02 20 23 27 + 2020-01-03 15 18 22 + 2020-01-04 30 33 37 + 2020-01-05 45 48 52 >>> df.shift(periods=3) - Col1 Col2 Col3 - 0 NaN NaN NaN - 1 NaN NaN NaN - 2 NaN NaN NaN - 3 10.0 13.0 17.0 - 4 20.0 23.0 27.0 - - >>> df.shift(periods=1, axis='columns') - Col1 Col2 Col3 - 0 NaN 10.0 13.0 - 1 NaN 20.0 23.0 - 2 NaN 15.0 18.0 - 3 NaN 30.0 33.0 - 4 NaN 45.0 48.0 + Col1 Col2 Col3 + 2020-01-01 NaN NaN NaN + 2020-01-02 NaN NaN NaN + 2020-01-03 NaN NaN NaN + 2020-01-04 10.0 13.0 17.0 + 2020-01-05 20.0 23.0 27.0 + + >>> df.shift(periods=1, axis="columns") + Col1 Col2 Col3 + 2020-01-01 NaN 10.0 13.0 + 2020-01-02 NaN 20.0 23.0 + 2020-01-03 NaN 15.0 18.0 + 2020-01-04 NaN 30.0 33.0 + 2020-01-05 NaN 45.0 48.0 >>> df.shift(periods=3, fill_value=0) - Col1 Col2 Col3 - 0 0 0 0 - 1 0 0 0 - 2 0 0 0 - 3 10 13 17 - 4 20 23 27 + Col1 Col2 Col3 + 2020-01-01 0 0 0 + 2020-01-02 0 0 0 + 2020-01-03 0 0 0 + 2020-01-04 10 13 17 + 2020-01-05 20 23 27 + + >>> df.shift(periods=3, freq="D") + Col1 Col2 Col3 + 2020-01-04 10 13 17 + 2020-01-05 20 23 27 + 2020-01-06 15 18 22 + 2020-01-07 30 33 37 + 2020-01-08 45 48 52 + + >>> df.shift(periods=3, freq="infer") + Col1 Col2 Col3 + 2020-01-04 10 13 17 + 2020-01-05 20 23 27 + 2020-01-06 15 18 22 + 2020-01-07 30 33 37 + 2020-01-08 45 48 52 """ if periods == 0: return self.copy() - block_axis = self._get_block_manager_axis(axis) if freq is None: + # when freq is None, data is shifted, index is not + block_axis = self._get_block_manager_axis(axis) new_data = self._mgr.shift( periods=periods, axis=block_axis, fill_value=fill_value ) + return self._constructor(new_data).__finalize__(self, method="shift") + + # when freq is given, index is shifted, data is not + index = self._get_axis(axis) + + if freq == "infer": + freq = getattr(index, "freq", None) + + if freq is None: + freq = getattr(index, "inferred_freq", None) + + if freq is None: + msg = "Freq was not set in the index hence cannot be inferred" + raise ValueError(msg) + + elif isinstance(freq, str): + freq = to_offset(freq) + + if isinstance(index, PeriodIndex): + orig_freq = to_offset(index.freq) + if freq != orig_freq: + assert orig_freq is not None # for mypy + raise ValueError( + f"Given freq {freq.rule_code} does not match " + f"PeriodIndex freq {orig_freq.rule_code}" + ) + new_ax = index.shift(periods) else: - return self.tshift(periods, freq) + new_ax = index.shift(periods, freq) - return self._constructor(new_data).__finalize__(self, method="shift") + result = self.set_axis(new_ax, axis) + return result.__finalize__(self, method="shift") def slice_shift(self: FrameOrSeries, periods: int = 1, axis=0) -> FrameOrSeries: """ @@ -9283,6 +9341,9 @@ def tshift( """ Shift the time index, using the index's frequency if available. + .. deprecated:: 1.1.0 + Use `shift` instead. + Parameters ---------- periods : int @@ -9303,39 +9364,19 @@ def tshift( attributes of the index. If neither of those attributes exist, a ValueError is thrown """ - index = self._get_axis(axis) - if freq is None: - freq = getattr(index, "freq", None) - - if freq is None: - freq = getattr(index, "inferred_freq", None) + warnings.warn( + ( + "tshift is deprecated and will be removed in a future version. " + "Please use shift instead." + ), + FutureWarning, + stacklevel=2, + ) if freq is None: - msg = "Freq was not given and was not set in the index" - raise ValueError(msg) - - if periods == 0: - return self - - if isinstance(freq, str): - freq = to_offset(freq) - - axis = self._get_axis_number(axis) - if isinstance(index, PeriodIndex): - orig_freq = to_offset(index.freq) - if freq != orig_freq: - assert orig_freq is not None # for mypy - raise ValueError( - f"Given freq {freq.rule_code} does not match " - f"PeriodIndex freq {orig_freq.rule_code}" - ) - new_ax = index.shift(periods) - else: - new_ax = index.shift(periods, freq) + freq = "infer" - result = self.copy() - result.set_axis(new_ax, axis, inplace=True) - return result.__finalize__(self, method="tshift") + return self.shift(periods, freq, axis) def truncate( self: FrameOrSeries, before=None, after=None, axis=None, copy: bool_t = True diff --git a/pandas/tests/frame/methods/test_shift.py b/pandas/tests/frame/methods/test_shift.py index 95f9fd9d7caf3..9ec029a6c4304 100644 --- a/pandas/tests/frame/methods/test_shift.py +++ b/pandas/tests/frame/methods/test_shift.py @@ -145,7 +145,10 @@ def test_shift_duplicate_columns(self): tm.assert_frame_equal(shifted[0], shifted[1]) tm.assert_frame_equal(shifted[0], shifted[2]) + @pytest.mark.filterwarnings("ignore:tshift is deprecated:FutureWarning") def test_tshift(self, datetime_frame): + # TODO: remove this test when tshift deprecation is enforced + # PeriodIndex ps = tm.makePeriodFrame() shifted = ps.tshift(1) @@ -159,7 +162,8 @@ def test_tshift(self, datetime_frame): shifted3 = ps.tshift(freq=offsets.BDay()) tm.assert_frame_equal(shifted, shifted3) - with pytest.raises(ValueError, match="does not match"): + msg = "Given freq M does not match PeriodIndex freq B" + with pytest.raises(ValueError, match=msg): ps.tshift(freq="M") # DatetimeIndex @@ -186,10 +190,61 @@ def test_tshift(self, datetime_frame): tm.assert_frame_equal(unshifted, inferred_ts) no_freq = datetime_frame.iloc[[0, 5, 7], :] - msg = "Freq was not given and was not set in the index" + msg = "Freq was not set in the index hence cannot be inferred" with pytest.raises(ValueError, match=msg): no_freq.tshift() + def test_tshift_deprecated(self, datetime_frame): + # GH#11631 + with tm.assert_produces_warning(FutureWarning): + datetime_frame.tshift() + + def test_period_index_frame_shift_with_freq(self): + ps = tm.makePeriodFrame() + + shifted = ps.shift(1, freq="infer") + unshifted = shifted.shift(-1, freq="infer") + tm.assert_frame_equal(unshifted, ps) + + shifted2 = ps.shift(freq="B") + tm.assert_frame_equal(shifted, shifted2) + + shifted3 = ps.shift(freq=offsets.BDay()) + tm.assert_frame_equal(shifted, shifted3) + + def test_datetime_frame_shift_with_freq(self, datetime_frame): + shifted = datetime_frame.shift(1, freq="infer") + unshifted = shifted.shift(-1, freq="infer") + tm.assert_frame_equal(datetime_frame, unshifted) + + shifted2 = datetime_frame.shift(freq=datetime_frame.index.freq) + tm.assert_frame_equal(shifted, shifted2) + + inferred_ts = DataFrame( + datetime_frame.values, + Index(np.asarray(datetime_frame.index)), + columns=datetime_frame.columns, + ) + shifted = inferred_ts.shift(1, freq="infer") + expected = datetime_frame.shift(1, freq="infer") + expected.index = expected.index._with_freq(None) + tm.assert_frame_equal(shifted, expected) + + unshifted = shifted.shift(-1, freq="infer") + tm.assert_frame_equal(unshifted, inferred_ts) + + def test_period_index_frame_shift_with_freq_error(self): + ps = tm.makePeriodFrame() + msg = "Given freq M does not match PeriodIndex freq B" + with pytest.raises(ValueError, match=msg): + ps.shift(freq="M") + + def test_datetime_frame_shift_with_freq_error(self, datetime_frame): + no_freq = datetime_frame.iloc[[0, 5, 7], :] + msg = "Freq was not set in the index hence cannot be inferred" + with pytest.raises(ValueError, match=msg): + no_freq.shift(freq="infer") + def test_shift_dt64values_int_fill_deprecated(self): # GH#31971 ser = pd.Series([pd.Timestamp("2020-01-01"), pd.Timestamp("2020-01-02")]) diff --git a/pandas/tests/generic/test_finalize.py b/pandas/tests/generic/test_finalize.py index d307eef8beb62..a152bc203721f 100644 --- a/pandas/tests/generic/test_finalize.py +++ b/pandas/tests/generic/test_finalize.py @@ -438,11 +438,21 @@ (pd.DataFrame, frame_data, operator.methodcaller("mask", np.array([[True]]))), (pd.Series, ([1, 2],), operator.methodcaller("slice_shift")), (pd.DataFrame, frame_data, operator.methodcaller("slice_shift")), - (pd.Series, (1, pd.date_range("2000", periods=4)), operator.methodcaller("tshift")), - ( - pd.DataFrame, - ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)), - operator.methodcaller("tshift"), + pytest.param( + ( + pd.Series, + (1, pd.date_range("2000", periods=4)), + operator.methodcaller("tshift"), + ), + marks=pytest.mark.filterwarnings("ignore::FutureWarning"), + ), + pytest.param( + ( + pd.DataFrame, + ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)), + operator.methodcaller("tshift"), + ), + marks=pytest.mark.filterwarnings("ignore::FutureWarning"), ), (pd.Series, ([1, 2],), operator.methodcaller("truncate", before=0)), (pd.DataFrame, frame_data, operator.methodcaller("truncate", before=0)), diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 80f34bb91cdfd..9cb7e4acfbf2a 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -1979,6 +1979,7 @@ def test_bool_aggs_dup_column_labels(bool_agg_func): @pytest.mark.parametrize( "idx", [pd.Index(["a", "a"]), pd.MultiIndex.from_tuples((("a", "a"), ("a", "a")))] ) +@pytest.mark.filterwarnings("ignore:tshift is deprecated:FutureWarning") def test_dup_labels_output_shape(groupby_func, idx): if groupby_func in {"size", "ngroup", "cumcount"}: pytest.skip("Not applicable") diff --git a/pandas/tests/groupby/test_groupby_subclass.py b/pandas/tests/groupby/test_groupby_subclass.py index 6adae19005c3a..7271911c5f80f 100644 --- a/pandas/tests/groupby/test_groupby_subclass.py +++ b/pandas/tests/groupby/test_groupby_subclass.py @@ -14,6 +14,7 @@ tm.SubclassedSeries(np.arange(0, 10), name="A"), ], ) +@pytest.mark.filterwarnings("ignore:tshift is deprecated:FutureWarning") def test_groupby_preserves_subclass(obj, groupby_func): # GH28330 -- preserve subclass through groupby operations diff --git a/pandas/tests/groupby/test_whitelist.py b/pandas/tests/groupby/test_whitelist.py index 1598cc24ba6fb..9b595328d9230 100644 --- a/pandas/tests/groupby/test_whitelist.py +++ b/pandas/tests/groupby/test_whitelist.py @@ -340,6 +340,7 @@ def test_groupby_function_rename(mframe): assert f.__name__ == name +@pytest.mark.filterwarnings("ignore:tshift is deprecated:FutureWarning") def test_groupby_selection_with_methods(df): # some methods which require DatetimeIndex rng = date_range("2014", periods=len(df)) diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py index 43d2bf80505db..e7637a598403f 100644 --- a/pandas/tests/resample/test_datetime_index.py +++ b/pandas/tests/resample/test_datetime_index.py @@ -1067,7 +1067,7 @@ def test_resample_anchored_intraday(simple_date_range_series): tm.assert_frame_equal(result, expected) result = df.resample("M", closed="left").mean() - exp = df.tshift(1, freq="D").resample("M", kind="period").mean() + exp = df.shift(1, freq="D").resample("M", kind="period").mean() exp = exp.to_timestamp(how="end") exp.index = exp.index + Timedelta(1, "ns") - Timedelta(1, "D") @@ -1086,7 +1086,7 @@ def test_resample_anchored_intraday(simple_date_range_series): tm.assert_frame_equal(result, expected) result = df.resample("Q", closed="left").mean() - expected = df.tshift(1, freq="D").resample("Q", kind="period", closed="left").mean() + expected = df.shift(1, freq="D").resample("Q", kind="period", closed="left").mean() expected = expected.to_timestamp(how="end") expected.index += Timedelta(1, "ns") - Timedelta(1, "D") expected.index._data.freq = "Q" diff --git a/pandas/tests/series/methods/test_shift.py b/pandas/tests/series/methods/test_shift.py index f981e98100d31..6257eecf4fc08 100644 --- a/pandas/tests/series/methods/test_shift.py +++ b/pandas/tests/series/methods/test_shift.py @@ -181,7 +181,10 @@ def test_shift_dst(self): tm.assert_series_equal(res, exp) assert res.dtype == "datetime64[ns, US/Eastern]" + @pytest.mark.filterwarnings("ignore:tshift is deprecated:FutureWarning") def test_tshift(self, datetime_series): + # TODO: remove this test when tshift deprecation is enforced + # PeriodIndex ps = tm.makePeriodSeries() shifted = ps.tshift(1) @@ -220,10 +223,59 @@ def test_tshift(self, datetime_series): tm.assert_series_equal(unshifted, inferred_ts) no_freq = datetime_series[[0, 5, 7]] - msg = "Freq was not given and was not set in the index" + msg = "Freq was not set in the index hence cannot be inferred" with pytest.raises(ValueError, match=msg): no_freq.tshift() + def test_tshift_deprecated(self, datetime_series): + # GH#11631 + with tm.assert_produces_warning(FutureWarning): + datetime_series.tshift() + + def test_period_index_series_shift_with_freq(self): + ps = tm.makePeriodSeries() + + shifted = ps.shift(1, freq="infer") + unshifted = shifted.shift(-1, freq="infer") + tm.assert_series_equal(unshifted, ps) + + shifted2 = ps.shift(freq="B") + tm.assert_series_equal(shifted, shifted2) + + shifted3 = ps.shift(freq=BDay()) + tm.assert_series_equal(shifted, shifted3) + + def test_datetime_series_shift_with_freq(self, datetime_series): + shifted = datetime_series.shift(1, freq="infer") + unshifted = shifted.shift(-1, freq="infer") + tm.assert_series_equal(datetime_series, unshifted) + + shifted2 = datetime_series.shift(freq=datetime_series.index.freq) + tm.assert_series_equal(shifted, shifted2) + + inferred_ts = Series( + datetime_series.values, Index(np.asarray(datetime_series.index)), name="ts" + ) + shifted = inferred_ts.shift(1, freq="infer") + expected = datetime_series.shift(1, freq="infer") + expected.index = expected.index._with_freq(None) + tm.assert_series_equal(shifted, expected) + + unshifted = shifted.shift(-1, freq="infer") + tm.assert_series_equal(unshifted, inferred_ts) + + def test_period_index_series_shift_with_freq_error(self): + ps = tm.makePeriodSeries() + msg = "Given freq M does not match PeriodIndex freq B" + with pytest.raises(ValueError, match=msg): + ps.shift(freq="M") + + def test_datetime_series_shift_with_freq_error(self, datetime_series): + no_freq = datetime_series[[0, 5, 7]] + msg = "Freq was not set in the index hence cannot be inferred" + with pytest.raises(ValueError, match=msg): + no_freq.shift(freq="infer") + def test_shift_int(self, datetime_series): ts = datetime_series.astype(int) shifted = ts.shift(1)
- [x] closes #11631 - xref #34452 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/34545
2020-06-03T10:41:20Z
2020-06-15T13:20:34Z
2020-06-15T13:20:34Z
2021-03-20T02:35:51Z
REF: avoid use of to_perioddelta
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 33b478c4d8da4..0deaf082dd1c7 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -36,7 +36,7 @@ from pandas._libs.tslibs.base cimport ABCTimestamp from pandas._libs.tslibs.ccalendar import ( MONTH_ALIASES, MONTH_TO_CAL_NUM, weekday_to_int, int_to_weekday, ) -from pandas._libs.tslibs.ccalendar cimport get_days_in_month, dayofweek +from pandas._libs.tslibs.ccalendar cimport DAY_NANOS, get_days_in_month, dayofweek from pandas._libs.tslibs.conversion cimport ( convert_datetime_to_tsobject, localize_pydatetime, @@ -1067,11 +1067,7 @@ cdef class RelativeDeltaOffset(BaseOffset): weeks = kwds.get("weeks", 0) * self.n if weeks: - # integer addition on PeriodIndex is deprecated, - # so we directly use _time_shift instead - asper = index.to_period("W") - shifted = asper._time_shift(weeks) - index = shifted.to_timestamp() + index.to_perioddelta("W") + index = index + timedelta(days=7 * weeks) timedelta_kwds = { k: v @@ -1383,7 +1379,9 @@ cdef class BusinessDay(BusinessMixin): @apply_index_wraps def apply_index(self, dtindex): - time = dtindex.to_perioddelta("D") + i8other = dtindex.asi8 + time = (i8other % DAY_NANOS).view("timedelta64[ns]") + # to_period rolls forward to next BDay; track and # reduce n where it does when rolling forward asper = dtindex.to_period("B") @@ -2276,6 +2274,7 @@ cdef class SemiMonthOffset(SingleConstructorOffset): from pandas import Timedelta dti = dtindex + i8other = dtindex.asi8 days_from_start = dtindex.to_perioddelta("M").asi8 delta = Timedelta(days=self.day_of_month - 1).value @@ -2289,7 +2288,7 @@ cdef class SemiMonthOffset(SingleConstructorOffset): roll = self._get_roll(dtindex, before_day_of_month, after_day_of_month) # isolate the time since it will be striped away one the next line - time = dtindex.to_perioddelta("D") + time = (i8other % DAY_NANOS).view("timedelta64[ns]") # apply the correct number of months @@ -2506,10 +2505,9 @@ cdef class Week(SingleConstructorOffset): if self.weekday is None: # integer addition on PeriodIndex is deprecated, # so we use _time_shift directly - asper = dtindex.to_period("W") - - shifted = asper._time_shift(self.n) - return shifted.to_timestamp() + dtindex.to_perioddelta("W") + td = timedelta(days=7 * self.n) + td64 = np.timedelta64(td, "ns") + return dtindex + td64 else: return self._end_apply_index(dtindex) @@ -2529,7 +2527,8 @@ cdef class Week(SingleConstructorOffset): from pandas import Timedelta from .frequencies import get_freq_code # TODO: avoid circular import - off = dtindex.to_perioddelta("D") + i8other = dtindex.asi8 + off = (i8other % DAY_NANOS).view("timedelta64") base, mult = get_freq_code(self.freqstr) base_period = dtindex.to_period(base)
tslibs is nominally self-contained, but several offsets methods still rely on DatetimeArray/PeriodArray behavior. This is the first of several steps whittling that away.
https://api.github.com/repos/pandas-dev/pandas/pulls/34539
2020-06-02T22:33:17Z
2020-06-03T18:03:30Z
2020-06-03T18:03:30Z
2021-11-20T23:22:30Z
REF: add to_offset to tslibs namespace
diff --git a/pandas/_libs/tslibs/__init__.py b/pandas/_libs/tslibs/__init__.py index 965adc82df676..25e2d8ba477e0 100644 --- a/pandas/_libs/tslibs/__init__.py +++ b/pandas/_libs/tslibs/__init__.py @@ -14,12 +14,14 @@ "ints_to_pytimedelta", "Timestamp", "tz_convert_single", + "to_offset", ] from .conversion import localize_pydatetime from .nattype import NaT, NaTType, iNaT, is_null_datetimelike, nat_strings from .np_datetime import OutOfBoundsDatetime +from .offsets import to_offset from .period import IncompatibleFrequency, Period from .resolution import Resolution from .timedeltas import Timedelta, delta_to_nanoseconds, ints_to_pytimedelta diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index da8e9b4bfdd4e..e2ecb6c343b7a 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -14,6 +14,7 @@ Timestamp, delta_to_nanoseconds, iNaT, + to_offset, ) from pandas._libs.tslibs.timestamps import ( RoundTo, @@ -427,7 +428,7 @@ def _with_freq(self, freq): else: # As an internal method, we can ensure this assertion always holds assert freq == "infer" - freq = frequencies.to_offset(self.inferred_freq) + freq = to_offset(self.inferred_freq) arr = self.view() arr._freq = freq @@ -1081,7 +1082,7 @@ def freq(self): @freq.setter def freq(self, value): if value is not None: - value = frequencies.to_offset(value) + value = to_offset(value) self._validate_frequency(self, value) self._freq = value @@ -1367,7 +1368,7 @@ def _time_shift(self, periods, freq=None): """ if freq is not None and freq != self.freq: if isinstance(freq, str): - freq = frequencies.to_offset(freq) + freq = to_offset(freq) offset = periods * freq result = self + offset return result @@ -1779,7 +1780,7 @@ def maybe_infer_freq(freq): if not isinstance(freq, DateOffset): # if a passed freq is None, don't infer automatically if freq != "infer": - freq = frequencies.to_offset(freq) + freq = to_offset(freq) else: freq_infer = True freq = None diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index f11f3ad974b37..8e4ae339ae53e 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -15,6 +15,7 @@ iNaT, resolution as libresolution, timezones, + to_offset, tzconversion, ) from pandas.errors import PerformanceWarning @@ -46,7 +47,7 @@ from pandas.core.arrays._ranges import generate_regular_range import pandas.core.common as com -from pandas.tseries.frequencies import get_period_alias, to_offset +from pandas.tseries.frequencies import get_period_alias from pandas.tseries.offsets import BDay, Day, Tick _midnight = time(0, 0) diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 3d4b42de01810..1b8a0b2780a7d 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -7,9 +7,12 @@ from pandas._libs.tslibs import ( NaT, NaTType, + Timedelta, + delta_to_nanoseconds, frequencies as libfrequencies, iNaT, period as libperiod, + to_offset, ) from pandas._libs.tslibs.fields import isleapyear_arr from pandas._libs.tslibs.offsets import Tick, delta_to_tick @@ -20,7 +23,6 @@ get_period_field_arr, period_asfreq_arr, ) -from pandas._libs.tslibs.timedeltas import Timedelta, delta_to_nanoseconds from pandas._typing import AnyArrayLike from pandas.util._decorators import cache_readonly @@ -45,7 +47,6 @@ from pandas.core.arrays import datetimelike as dtl import pandas.core.common as com -from pandas.tseries import frequencies from pandas.tseries.offsets import DateOffset @@ -902,7 +903,7 @@ def validate_dtype_freq(dtype, freq): IncompatibleFrequency : mismatch between dtype and freq """ if freq is not None: - freq = frequencies.to_offset(freq) + freq = to_offset(freq) if dtype is not None: dtype = pandas_dtype(dtype) diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 4de105e8be364..f439f07790274 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -4,7 +4,7 @@ import numpy as np from pandas._libs import lib, tslibs -from pandas._libs.tslibs import NaT, Period, Timedelta, Timestamp, iNaT +from pandas._libs.tslibs import NaT, Period, Timedelta, Timestamp, iNaT, to_offset from pandas._libs.tslibs.conversion import precision_from_unit from pandas._libs.tslibs.fields import get_timedelta_field from pandas._libs.tslibs.timedeltas import array_to_timedelta64, parse_timedelta_unit @@ -35,7 +35,6 @@ from pandas.core.construction import extract_array from pandas.core.ops.common import unpack_zerodim_and_defer -from pandas.tseries.frequencies import to_offset from pandas.tseries.offsets import Tick diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index ff35876ab2e73..84284c581c9e5 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -20,7 +20,7 @@ import pytz from pandas._libs.interval import Interval -from pandas._libs.tslibs import NaT, Period, Timestamp, timezones +from pandas._libs.tslibs import NaT, Period, Timestamp, timezones, to_offset from pandas._libs.tslibs.offsets import BaseOffset from pandas._typing import DtypeObj, Ordered @@ -925,7 +925,6 @@ def _parse_dtype_strict(cls, freq): m = cls._match.search(freq) if m is not None: freq = m.group("freq") - from pandas.tseries.frequencies import to_offset freq = to_offset(freq) if freq is not None: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 0260f30b9e7e2..e5f5cb232fdd1 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -30,7 +30,8 @@ from pandas._config import config -from pandas._libs import Timestamp, lib +from pandas._libs import lib +from pandas._libs.tslibs import Timestamp, to_offset from pandas._typing import ( Axis, FilePathOrBuffer, @@ -106,7 +107,6 @@ from pandas.io.formats import format as fmt from pandas.io.formats.format import DataFrameFormatter, format_percentiles from pandas.io.formats.printing import pprint_thing -from pandas.tseries.frequencies import to_offset from pandas.tseries.offsets import Tick if TYPE_CHECKING: diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 4677faa6b7d24..68c55426294ef 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -6,7 +6,7 @@ import numpy as np from pandas._libs import NaT, Period, Timestamp, index as libindex, lib, tslib -from pandas._libs.tslibs import Resolution, fields, parsing, timezones +from pandas._libs.tslibs import Resolution, fields, parsing, timezones, to_offset from pandas._libs.tslibs.frequencies import get_freq_group from pandas._libs.tslibs.offsets import prefix_mapping from pandas._typing import DtypeObj, Label @@ -30,8 +30,6 @@ from pandas.core.indexes.extension import inherit_names from pandas.core.tools.times import to_time -from pandas.tseries.frequencies import to_offset - def _new_DatetimeIndex(cls, d): """ diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 7d7572973707c..1a59e066879cc 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -7,8 +7,9 @@ from pandas._config import get_option -from pandas._libs import Timedelta, Timestamp, lib +from pandas._libs import lib from pandas._libs.interval import Interval, IntervalMixin, IntervalTree +from pandas._libs.tslibs import Timedelta, Timestamp, to_offset from pandas._typing import AnyArrayLike, Label from pandas.util._decorators import Appender, Substitution, cache_readonly from pandas.util._exceptions import rewrite_exception @@ -55,7 +56,6 @@ from pandas.core.indexes.timedeltas import TimedeltaIndex, timedelta_range from pandas.core.ops import get_op_result_name -from pandas.tseries.frequencies import to_offset from pandas.tseries.offsets import DateOffset _VALID_CLOSED = {"left", "right", "both", "neither"} diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 184fae4b97416..ce3ff17814a25 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -1,6 +1,7 @@ """ implement the TimedeltaIndex """ -from pandas._libs import Timedelta, index as libindex, lib +from pandas._libs import index as libindex, lib +from pandas._libs.tslibs import Timedelta, to_offset from pandas._typing import DtypeObj, Label from pandas.util._decorators import doc @@ -24,8 +25,6 @@ ) from pandas.core.indexes.extension import inherit_names -from pandas.tseries.frequencies import to_offset - @inherit_names( ["__neg__", "__pos__", "__abs__", "total_seconds", "round", "floor", "ceil"] diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 5df80645c2b5d..32e947dc414d2 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -6,8 +6,14 @@ import numpy as np from pandas._libs import lib -from pandas._libs.tslibs import NaT, Period, Timedelta, Timestamp -from pandas._libs.tslibs.period import IncompatibleFrequency +from pandas._libs.tslibs import ( + IncompatibleFrequency, + NaT, + Period, + Timedelta, + Timestamp, + to_offset, +) from pandas._typing import TimedeltaConvertibleTypes, TimestampConvertibleTypes from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError @@ -28,7 +34,7 @@ from pandas.core.indexes.period import PeriodIndex, period_range from pandas.core.indexes.timedeltas import TimedeltaIndex, timedelta_range -from pandas.tseries.frequencies import is_subperiod, is_superperiod, to_offset +from pandas.tseries.frequencies import is_subperiod, is_superperiod from pandas.tseries.offsets import DateOffset, Day, Nano, Tick _shared_docs_kwargs: Dict[str, str] = dict() diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index b06128052fa8f..92be2d056cfcb 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -10,6 +10,7 @@ import numpy as np +from pandas._libs.tslibs import to_offset import pandas._libs.window.aggregations as window_aggregations from pandas._typing import Axis, FrameOrSeries, Scalar from pandas.compat._optional import import_optional_dependency @@ -1977,8 +1978,6 @@ def _validate_freq(self): """ Validate & return window frequency. """ - from pandas.tseries.frequencies import to_offset - try: return to_offset(self.window) except (TypeError, ValueError) as err: diff --git a/pandas/plotting/_matplotlib/timeseries.py b/pandas/plotting/_matplotlib/timeseries.py index 631760c547985..d5a390dc34d39 100644 --- a/pandas/plotting/_matplotlib/timeseries.py +++ b/pandas/plotting/_matplotlib/timeseries.py @@ -5,8 +5,8 @@ import numpy as np +from pandas._libs.tslibs import Period, to_offset from pandas._libs.tslibs.frequencies import FreqGroup, base_and_stride, get_freq_code -from pandas._libs.tslibs.period import Period from pandas.core.dtypes.generic import ( ABCDatetimeIndex, @@ -20,12 +20,7 @@ TimeSeries_DateLocator, TimeSeries_TimedeltaFormatter, ) -from pandas.tseries.frequencies import ( - get_period_alias, - is_subperiod, - is_superperiod, - to_offset, -) +from pandas.tseries.frequencies import get_period_alias, is_subperiod, is_superperiod from pandas.tseries.offsets import DateOffset if TYPE_CHECKING: diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py index d206622521816..ccd03e841a40d 100644 --- a/pandas/tests/arithmetic/test_period.py +++ b/pandas/tests/arithmetic/test_period.py @@ -6,17 +6,15 @@ import numpy as np import pytest -from pandas._libs.tslibs.period import IncompatibleFrequency +from pandas._libs.tslibs import IncompatibleFrequency, Period, Timestamp, to_offset from pandas.errors import PerformanceWarning import pandas as pd -from pandas import Period, PeriodIndex, Series, TimedeltaIndex, Timestamp, period_range +from pandas import PeriodIndex, Series, TimedeltaIndex, period_range import pandas._testing as tm from pandas.core import ops from pandas.core.arrays import TimedeltaArray -from pandas.tseries.frequencies import to_offset - from .common import assert_invalid_comparison # ------------------------------------------------------------------ diff --git a/pandas/tests/indexes/datetimes/test_scalar_compat.py b/pandas/tests/indexes/datetimes/test_scalar_compat.py index e5d1277aed9cd..23dedf6f86a09 100644 --- a/pandas/tests/indexes/datetimes/test_scalar_compat.py +++ b/pandas/tests/indexes/datetimes/test_scalar_compat.py @@ -6,14 +6,12 @@ import numpy as np import pytest -from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime +from pandas._libs.tslibs import OutOfBoundsDatetime, to_offset import pandas as pd from pandas import DatetimeIndex, Timestamp, date_range import pandas._testing as tm -from pandas.tseries.frequencies import to_offset - class TestDatetimeIndexOps: def test_dti_time(self): diff --git a/pandas/tests/scalar/timestamp/test_arithmetic.py b/pandas/tests/scalar/timestamp/test_arithmetic.py index eb9932f9a3a97..954301b979074 100644 --- a/pandas/tests/scalar/timestamp/test_arithmetic.py +++ b/pandas/tests/scalar/timestamp/test_arithmetic.py @@ -3,14 +3,16 @@ import numpy as np import pytest -from pandas.errors import OutOfBoundsDatetime +from pandas._libs.tslibs import ( + OutOfBoundsDatetime, + Timedelta, + Timestamp, + offsets, + to_offset, +) -from pandas import Timedelta, Timestamp import pandas._testing as tm -from pandas.tseries import offsets -from pandas.tseries.frequencies import to_offset - class TestTimestampArithmetic: def test_overflow_offset(self): diff --git a/pandas/tests/scalar/timestamp/test_unary_ops.py b/pandas/tests/scalar/timestamp/test_unary_ops.py index e657559b55d5a..388ff4ea039be 100644 --- a/pandas/tests/scalar/timestamp/test_unary_ops.py +++ b/pandas/tests/scalar/timestamp/test_unary_ops.py @@ -5,15 +5,12 @@ import pytz from pytz import utc -from pandas._libs.tslibs import conversion +from pandas._libs.tslibs import NaT, Timestamp, conversion, to_offset from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG import pandas.util._test_decorators as td -from pandas import NaT, Timestamp import pandas._testing as tm -from pandas.tseries.frequencies import to_offset - class TestTimestampUnaryOps: diff --git a/pandas/tests/tseries/frequencies/test_freq_code.py b/pandas/tests/tseries/frequencies/test_freq_code.py index f82d225f0538c..d4eb31168b20e 100644 --- a/pandas/tests/tseries/frequencies/test_freq_code.py +++ b/pandas/tests/tseries/frequencies/test_freq_code.py @@ -1,5 +1,6 @@ import pytest +from pandas._libs.tslibs import to_offset from pandas._libs.tslibs.frequencies import ( FreqGroup, _attrname_to_abbrevs, @@ -10,7 +11,6 @@ ) from pandas._libs.tslibs.resolution import Resolution as _reso -from pandas.tseries.frequencies import to_offset import pandas.tseries.offsets as offsets diff --git a/pandas/tests/tseries/frequencies/test_to_offset.py b/pandas/tests/tseries/frequencies/test_to_offset.py index d3510eaa5c749..04be0e445a3b2 100644 --- a/pandas/tests/tseries/frequencies/test_to_offset.py +++ b/pandas/tests/tseries/frequencies/test_to_offset.py @@ -2,16 +2,13 @@ import pytest -from pandas import Timedelta - -import pandas.tseries.frequencies as frequencies -import pandas.tseries.offsets as offsets +from pandas._libs.tslibs import Timedelta, offsets, to_offset @pytest.mark.parametrize( "freq_input,expected", [ - (frequencies.to_offset("10us"), offsets.Micro(10)), + (to_offset("10us"), offsets.Micro(10)), (offsets.Hour(), offsets.Hour()), ((5, "T"), offsets.Minute(5)), ("2h30min", offsets.Minute(150)), @@ -33,7 +30,7 @@ ], ) def test_to_offset(freq_input, expected): - result = frequencies.to_offset(freq_input) + result = to_offset(freq_input) assert result == expected @@ -41,7 +38,7 @@ def test_to_offset(freq_input, expected): "freqstr,expected", [("-1S", -1), ("-2SM", -2), ("-1SMS", -1), ("-5min10s", -310)] ) def test_to_offset_negative(freqstr, expected): - result = frequencies.to_offset(freqstr) + result = to_offset(freqstr) assert result.n == expected @@ -88,12 +85,12 @@ def test_to_offset_invalid(freqstr): # inputs contain regex special characters. msg = re.escape(f"Invalid frequency: {freqstr}") with pytest.raises(ValueError, match=msg): - frequencies.to_offset(freqstr) + to_offset(freqstr) def test_to_offset_no_evaluate(): with pytest.raises(ValueError, match="Could not evaluate"): - frequencies.to_offset(("", "")) + to_offset(("", "")) @pytest.mark.parametrize( @@ -108,7 +105,7 @@ def test_to_offset_no_evaluate(): ], ) def test_to_offset_whitespace(freqstr, expected): - result = frequencies.to_offset(freqstr) + result = to_offset(freqstr) assert result == expected @@ -116,13 +113,13 @@ def test_to_offset_whitespace(freqstr, expected): "freqstr,expected", [("00H 00T 01S", 1), ("-00H 03T 14S", -194)] ) def test_to_offset_leading_zero(freqstr, expected): - result = frequencies.to_offset(freqstr) + result = to_offset(freqstr) assert result.n == expected @pytest.mark.parametrize("freqstr,expected", [("+1d", 1), ("+2h30min", 150)]) def test_to_offset_leading_plus(freqstr, expected): - result = frequencies.to_offset(freqstr) + result = to_offset(freqstr) assert result.n == expected @@ -135,7 +132,7 @@ def test_to_offset_leading_plus(freqstr, expected): (dict(hours=1, minutes=-10), offsets.Minute(50)), (dict(weeks=1), offsets.Day(7)), (dict(hours=1), offsets.Hour(1)), - (dict(hours=1), frequencies.to_offset("60min")), + (dict(hours=1), to_offset("60min")), (dict(microseconds=1), offsets.Micro(1)), (dict(microseconds=0), offsets.Nano(0)), ], @@ -143,7 +140,7 @@ def test_to_offset_leading_plus(freqstr, expected): def test_to_offset_pd_timedelta(kwargs, expected): # see gh-9064 td = Timedelta(**kwargs) - result = frequencies.to_offset(td) + result = to_offset(td) assert result == expected @@ -164,5 +161,5 @@ def test_to_offset_pd_timedelta(kwargs, expected): ], ) def test_anchored_shortcuts(shortcut, expected): - result = frequencies.to_offset(shortcut) + result = to_offset(shortcut) assert result == expected diff --git a/pandas/tests/tslibs/test_api.py b/pandas/tests/tslibs/test_api.py index 908f9fa699891..bbabfed4cb976 100644 --- a/pandas/tests/tslibs/test_api.py +++ b/pandas/tests/tslibs/test_api.py @@ -40,6 +40,7 @@ def test_namespace(): "ints_to_pytimedelta", "localize_pydatetime", "tz_convert_single", + "to_offset", ] expected = set(submodules + api)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/34538
2020-06-02T21:22:59Z
2020-06-03T22:31:31Z
2020-06-03T22:31:31Z
2020-06-03T22:32:53Z
CI/TST #34131 fixed test_floordiv_axis0_numexpr_path
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index e7b7f3e524d44..39e064ae4dd5d 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -11,6 +11,7 @@ from pandas import DataFrame, MultiIndex, Series import pandas._testing as tm import pandas.core.common as com +from pandas.core.computation.expressions import _MIN_ELEMENTS, _NUMEXPR_INSTALLED from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int # ------------------------------------------------------------------- @@ -374,13 +375,13 @@ def test_floordiv_axis0(self): result2 = df.floordiv(ser.values, axis=0) tm.assert_frame_equal(result2, expected) - @pytest.mark.slow + @pytest.mark.skipif(not _NUMEXPR_INSTALLED, reason="numexpr not installed") @pytest.mark.parametrize("opname", ["floordiv", "pow"]) def test_floordiv_axis0_numexpr_path(self, opname): # case that goes through numexpr and has to fall back to masked_arith_op op = getattr(operator, opname) - arr = np.arange(10 ** 6).reshape(100, -1) + arr = np.arange(_MIN_ELEMENTS + 100).reshape(_MIN_ELEMENTS // 100 + 1, -1) * 100 df = pd.DataFrame(arr) df["C"] = 1.0
xref #34131 - [ x ] passes `black pandas` - [ x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` Test `/pandas/tests/frame/test_floordiv_axis0_numexpr_path` is slow. It builds a df with 10^6 elements to tests functions `floordiv` and `pow`. The number of elements can be reduced with keeping the same min/max values inside the dataframe. Before/after timeit comparison: ``` 26.6 s ± 1.04 s per loop (mean ± std. dev. of 7 runs, 1 loop each) 37.6 ms ± 3.45 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/34537
2020-06-02T21:04:50Z
2020-06-14T20:46:34Z
2020-06-14T20:46:33Z
2021-01-02T08:31:59Z
DOC: remove an extra colon
diff --git a/doc/source/user_guide/gotchas.rst b/doc/source/user_guide/gotchas.rst index e0f6c7570074b..a96c70405d859 100644 --- a/doc/source/user_guide/gotchas.rst +++ b/doc/source/user_guide/gotchas.rst @@ -321,7 +321,7 @@ Byte-ordering issues -------------------- Occasionally you may have to deal with data that were created on a machine with a different byte order than the one on which you are running Python. A common -symptom of this issue is an error like::: +symptom of this issue is an error like:: Traceback ...
fix a typo by removing an extra colon - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/34533
2020-06-02T14:47:48Z
2020-06-02T19:56:26Z
2020-06-02T19:56:26Z
2020-06-02T21:30:44Z
CLN: remove Resolution.get_attrname_from_abbrev
diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx index 91059638cd73a..c0baabdc98acd 100644 --- a/pandas/_libs/tslibs/resolution.pyx +++ b/pandas/_libs/tslibs/resolution.pyx @@ -41,18 +41,6 @@ _reso_str_map = { _str_reso_map = {v: k for k, v in _reso_str_map.items()} -# factor to multiply a value by to convert it to the next finer grained -# resolution -_reso_mult_map = { - RESO_NS: None, - RESO_US: 1000, - RESO_MS: 1000, - RESO_SEC: 1000, - RESO_MIN: 60, - RESO_HR: 60, - RESO_DAY: 24, -} - # ---------------------------------------------------------------------- @@ -145,17 +133,17 @@ class Resolution(Enum): def __ge__(self, other): return self.value >= other.value - @classmethod - def get_str(cls, reso: "Resolution") -> str: + @property + def attrname(self) -> str: """ - Return resolution str against resolution code. + Return datetime attribute name corresponding to this Resolution. Examples -------- - >>> Resolution.get_str(Resolution.RESO_SEC) + >>> Resolution.RESO_SEC.attrname 'second' """ - return _reso_str_map[reso.value] + return _reso_str_map[self.value] @classmethod def from_attrname(cls, attrname: str) -> "Resolution": @@ -172,18 +160,6 @@ class Resolution(Enum): """ return cls(_str_reso_map[attrname]) - @classmethod - def get_attrname_from_abbrev(cls, freq: str) -> str: - """ - Return resolution str against frequency str. - - Examples - -------- - >>> Resolution.get_attrname_from_abbrev('H') - 'hour' - """ - return _abbrev_to_attrnames[freq] - @classmethod def get_reso_from_freq(cls, freq: str) -> "Resolution": """ @@ -199,7 +175,8 @@ class Resolution(Enum): >>> Resolution.get_reso_from_freq('H') == Resolution.RESO_HR True """ - return cls.from_attrname(cls.get_attrname_from_abbrev(freq)) + attr_name = _abbrev_to_attrnames[freq] + return cls.from_attrname(attr_name) # ---------------------------------------------------------------------- diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 55420181190aa..da8e9b4bfdd4e 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -1126,7 +1126,7 @@ def resolution(self) -> str: # somewhere in the past it was decided we default to day return "day" # otherwise we fall through and will raise - return Resolution.get_str(self._resolution_obj) + return self._resolution_obj.attrname # type: ignore @classmethod def _validate_frequency(cls, index, freq, **kwargs): diff --git a/pandas/tests/tseries/frequencies/test_freq_code.py b/pandas/tests/tseries/frequencies/test_freq_code.py index 133db0c3d611b..f82d225f0538c 100644 --- a/pandas/tests/tseries/frequencies/test_freq_code.py +++ b/pandas/tests/tseries/frequencies/test_freq_code.py @@ -93,9 +93,6 @@ def test_get_to_timestamp_base(freqstr, exp_freqstr): @pytest.mark.parametrize( "freqstr,expected", [ - ("A", "year"), - ("Q", "quarter"), - ("M", "month"), ("D", "day"), ("H", "hour"), ("T", "minute"), @@ -106,19 +103,20 @@ def test_get_to_timestamp_base(freqstr, exp_freqstr): ], ) def test_get_attrname_from_abbrev(freqstr, expected): - assert _reso.get_attrname_from_abbrev(freqstr) == expected + assert _reso.get_reso_from_freq(freqstr).attrname == expected -@pytest.mark.parametrize("freq", ["A", "Q", "M", "D", "H", "T", "S", "L", "U", "N"]) -def test_get_freq_roundtrip(freq): - result = _attrname_to_abbrevs[_reso.get_attrname_from_abbrev(freq)] - assert freq == result +@pytest.mark.parametrize("freq", ["A", "Q", "M"]) +def test_get_freq_unsupported_(freq): + # Lowest-frequency resolution is for Day + with pytest.raises(KeyError, match=freq.lower()): + _reso.get_reso_from_freq(freq) -@pytest.mark.parametrize("freq", ["D", "H", "T", "S", "L", "U"]) +@pytest.mark.parametrize("freq", ["D", "H", "T", "S", "L", "U", "N"]) def test_get_freq_roundtrip2(freq): obj = _reso.get_reso_from_freq(freq) - result = _attrname_to_abbrevs[_reso.get_str(obj)] + result = _attrname_to_abbrevs[obj.attrname] assert freq == result
rename get_str -> attrname, make it a property instead of a classmethod
https://api.github.com/repos/pandas-dev/pandas/pulls/34524
2020-06-02T03:05:17Z
2020-06-02T13:03:01Z
2020-06-02T13:03:01Z
2020-06-02T14:39:40Z
CLN: stronger typing for Period.freq
diff --git a/pandas/_libs/tslibs/offsets.pxd b/pandas/_libs/tslibs/offsets.pxd index 69b878c77f0b8..2b8ad97b83917 100644 --- a/pandas/_libs/tslibs/offsets.pxd +++ b/pandas/_libs/tslibs/offsets.pxd @@ -1,3 +1,11 @@ +from numpy cimport int64_t + cpdef to_offset(object obj) cdef bint is_offset_object(object obj) cdef bint is_tick_object(object obj) + +cdef class BaseOffset: + cdef readonly: + int64_t n + bint normalize + dict _cache diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 9e6a2c0507f7f..33b478c4d8da4 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -366,10 +366,10 @@ cdef class BaseOffset: _adjust_dst = True _deprecations = frozenset(["isAnchored", "onOffset"]) - cdef readonly: - int64_t n - bint normalize - dict _cache + # cdef readonly: + # int64_t n + # bint normalize + # dict _cache def __init__(self, n=1, normalize=False): n = self._validate_n(n) diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index bc190825214c1..30c5dd1a881ea 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -69,7 +69,10 @@ from pandas._libs.tslibs.nattype cimport ( c_nat_strings as nat_strings, ) from pandas._libs.tslibs.offsets cimport ( - to_offset, is_tick_object, is_offset_object, + BaseOffset, + to_offset, + is_tick_object, + is_offset_object, ) from pandas._libs.tslibs.tzconversion cimport tz_convert_utc_to_tzlocal @@ -1509,9 +1512,9 @@ cdef class _Period: cdef readonly: int64_t ordinal - object freq + BaseOffset freq - def __cinit__(self, ordinal, freq): + def __cinit__(self, int64_t ordinal, BaseOffset freq): self.ordinal = ordinal self.freq = freq diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index f7f8b86359732..10c1a56a2eb4e 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -1205,7 +1205,7 @@ class Timedelta(_Timedelta): cdef: int64_t result, unit - from pandas.tseries.frequencies import to_offset + from pandas._libs.tslibs.offsets import to_offset unit = to_offset(freq).nanos result = unit * rounder(self.value / float(unit)) return Timedelta(result, unit='ns')
https://api.github.com/repos/pandas-dev/pandas/pulls/34523
2020-06-02T02:14:35Z
2020-06-02T13:03:55Z
2020-06-02T13:03:55Z
2020-06-02T14:39:22Z
DEPR: tz kwarg in Period.to_timestamp
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 6e8cbc34be062..2230c6cb88843 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -702,6 +702,7 @@ Deprecations raise an ``IndexError`` in the future. You can manually convert to an integer key instead (:issue:`34191`). - The ``squeeze`` keyword in the ``groupby`` function is deprecated and will be removed in a future version (:issue:`32380`) +- The ``tz`` keyword in :meth:`Period.to_timestamp` is deprecated and will be removed in a future version; use `per.to_timestamp(...).tz_localize(tz)`` instead (:issue:`34522`) .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index bc190825214c1..e271518525008 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -1,3 +1,5 @@ +import warnings + from cpython.object cimport PyObject_RichCompareBool, Py_EQ, Py_NE from numpy cimport int64_t, import_array, ndarray @@ -1724,6 +1726,16 @@ cdef class _Period: ------- Timestamp """ + if tz is not None: + # GH#34522 + warnings.warn( + "Period.to_timestamp `tz` argument is deprecated and will " + "be removed in a future version. Use " + "`per.to_timestamp(...).tz_localize(tz)` instead.", + FutureWarning, + stacklevel=1, + ) + how = validate_end_alias(how) end = how == 'E' diff --git a/pandas/plotting/_matplotlib/timeseries.py b/pandas/plotting/_matplotlib/timeseries.py index 631760c547985..fc7fd037bebdd 100644 --- a/pandas/plotting/_matplotlib/timeseries.py +++ b/pandas/plotting/_matplotlib/timeseries.py @@ -221,7 +221,7 @@ def _use_dynamic_x(ax, data): x = data.index if base <= FreqGroup.FR_DAY: return x[:1].is_normalized - return Period(x[0], freq).to_timestamp(tz=x.tz) == x[0] + return Period(x[0], freq).to_timestamp().tz_localize(x.tz) == x[0] return True diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py index 41909b4b1a9bb..42bd20fd9640b 100644 --- a/pandas/tests/scalar/period/test_period.py +++ b/pandas/tests/scalar/period/test_period.py @@ -506,7 +506,9 @@ def test_hash(self): @pytest.mark.parametrize("tzstr", ["Europe/Brussels", "Asia/Tokyo", "US/Pacific"]) def test_to_timestamp_tz_arg(self, tzstr): - p = Period("1/1/2005", freq="M").to_timestamp(tz=tzstr) + # GH#34522 tz kwarg deprecated + with tm.assert_produces_warning(FutureWarning): + p = Period("1/1/2005", freq="M").to_timestamp(tz=tzstr) exp = Timestamp("1/1/2005", tz="UTC").tz_convert(tzstr) exp_zone = pytz.timezone(tzstr).normalize(p) @@ -514,7 +516,8 @@ def test_to_timestamp_tz_arg(self, tzstr): assert p.tz == exp_zone.tzinfo assert p.tz == exp.tz - p = Period("1/1/2005", freq="3H").to_timestamp(tz=tzstr) + with tm.assert_produces_warning(FutureWarning): + p = Period("1/1/2005", freq="3H").to_timestamp(tz=tzstr) exp = Timestamp("1/1/2005", tz="UTC").tz_convert(tzstr) exp_zone = pytz.timezone(tzstr).normalize(p) @@ -522,7 +525,8 @@ def test_to_timestamp_tz_arg(self, tzstr): assert p.tz == exp_zone.tzinfo assert p.tz == exp.tz - p = Period("1/1/2005", freq="A").to_timestamp(freq="A", tz=tzstr) + with tm.assert_produces_warning(FutureWarning): + p = Period("1/1/2005", freq="A").to_timestamp(freq="A", tz=tzstr) exp = Timestamp("31/12/2005", tz="UTC").tz_convert(tzstr) exp_zone = pytz.timezone(tzstr).normalize(p) @@ -530,7 +534,8 @@ def test_to_timestamp_tz_arg(self, tzstr): assert p.tz == exp_zone.tzinfo assert p.tz == exp.tz - p = Period("1/1/2005", freq="A").to_timestamp(freq="3H", tz=tzstr) + with tm.assert_produces_warning(FutureWarning): + p = Period("1/1/2005", freq="A").to_timestamp(freq="3H", tz=tzstr) exp = Timestamp("1/1/2005", tz="UTC").tz_convert(tzstr) exp_zone = pytz.timezone(tzstr).normalize(p) @@ -544,20 +549,23 @@ def test_to_timestamp_tz_arg(self, tzstr): ) def test_to_timestamp_tz_arg_dateutil(self, tzstr): tz = maybe_get_tz(tzstr) - p = Period("1/1/2005", freq="M").to_timestamp(tz=tz) + with tm.assert_produces_warning(FutureWarning): + p = Period("1/1/2005", freq="M").to_timestamp(tz=tz) exp = Timestamp("1/1/2005", tz="UTC").tz_convert(tzstr) assert p == exp assert p.tz == dateutil_gettz(tzstr.split("/", 1)[1]) assert p.tz == exp.tz - p = Period("1/1/2005", freq="M").to_timestamp(freq="3H", tz=tz) + with tm.assert_produces_warning(FutureWarning): + p = Period("1/1/2005", freq="M").to_timestamp(freq="3H", tz=tz) exp = Timestamp("1/1/2005", tz="UTC").tz_convert(tzstr) assert p == exp assert p.tz == dateutil_gettz(tzstr.split("/", 1)[1]) assert p.tz == exp.tz def test_to_timestamp_tz_arg_dateutil_from_string(self): - p = Period("1/1/2005", freq="M").to_timestamp(tz="dateutil/Europe/Brussels") + with tm.assert_produces_warning(FutureWarning): + p = Period("1/1/2005", freq="M").to_timestamp(tz="dateutil/Europe/Brussels") assert p.tz == dateutil_gettz("Europe/Brussels") def test_to_timestamp_mult(self):
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry inconsistent with PeriodArray/PeriodIndex methods
https://api.github.com/repos/pandas-dev/pandas/pulls/34522
2020-06-01T23:20:39Z
2020-06-03T11:34:03Z
2020-06-03T11:34:03Z
2020-06-03T16:29:07Z
REF: add Resolution to tslibs.__init__
diff --git a/pandas/_libs/tslibs/__init__.py b/pandas/_libs/tslibs/__init__.py index 370b49f2c4fa3..965adc82df676 100644 --- a/pandas/_libs/tslibs/__init__.py +++ b/pandas/_libs/tslibs/__init__.py @@ -8,6 +8,7 @@ "OutOfBoundsDatetime", "IncompatibleFrequency", "Period", + "Resolution", "Timedelta", "delta_to_nanoseconds", "ints_to_pytimedelta", @@ -20,6 +21,7 @@ from .nattype import NaT, NaTType, iNaT, is_null_datetimelike, nat_strings from .np_datetime import OutOfBoundsDatetime from .period import IncompatibleFrequency, Period +from .resolution import Resolution from .timedeltas import Timedelta, delta_to_nanoseconds, ints_to_pytimedelta from .timestamps import Timestamp from .tzconversion import tz_convert_single diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index b9f712e4d64fe..55420181190aa 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -5,9 +5,16 @@ import numpy as np -from pandas._libs import NaT, NaTType, Period, Timestamp, algos, iNaT, lib -from pandas._libs.tslibs.resolution import Resolution -from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds +from pandas._libs import algos, lib +from pandas._libs.tslibs import ( + NaT, + NaTType, + Period, + Resolution, + Timestamp, + delta_to_nanoseconds, + iNaT, +) from pandas._libs.tslibs.timestamps import ( RoundTo, integer_op_not_supported, @@ -1103,7 +1110,7 @@ def inferred_freq(self): return None @property # NB: override with cache_readonly in immutable subclasses - def _resolution(self) -> Optional[Resolution]: + def _resolution_obj(self) -> Optional[Resolution]: try: return Resolution.get_reso_from_freq(self.freqstr) except KeyError: @@ -1114,12 +1121,12 @@ def resolution(self) -> str: """ Returns day, hour, minute, second, millisecond or microsecond """ - if self._resolution is None: + if self._resolution_obj is None: if is_period_dtype(self.dtype): # somewhere in the past it was decided we default to day return "day" # otherwise we fall through and will raise - return Resolution.get_str(self._resolution) + return Resolution.get_str(self._resolution_obj) @classmethod def _validate_frequency(cls, index, freq, **kwargs): diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index bade1c031d556..f11f3ad974b37 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -537,7 +537,7 @@ def is_normalized(self): return conversion.is_date_array_normalized(self.asi8, self.tz) @property # NB: override with cache_readonly in immutable subclasses - def _resolution(self) -> libresolution.Resolution: + def _resolution_obj(self) -> libresolution.Resolution: return libresolution.get_resolution(self.asi8, self.tz) # ---------------------------------------------------------------- diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 2a7cd0eac04a6..21f4b3f8bb76a 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -7,7 +7,7 @@ import numpy as np from pandas._libs import NaT, Timedelta, iNaT, join as libjoin, lib -from pandas._libs.tslibs import timezones +from pandas._libs.tslibs import Resolution, timezones from pandas._libs.tslibs.parsing import DateParseError from pandas._typing import Label from pandas.compat.numpy import function as nv @@ -78,7 +78,7 @@ def wrapper(left, right): @inherit_names( - ["inferred_freq", "_isnan", "_resolution", "resolution"], + ["inferred_freq", "_isnan", "_resolution_obj", "resolution"], DatetimeLikeArrayMixin, cache=True, ) @@ -93,7 +93,7 @@ class DatetimeIndexOpsMixin(ExtensionIndex): _data: Union[DatetimeArray, TimedeltaArray, PeriodArray] freq: Optional[DateOffset] freqstr: Optional[str] - _resolution: int + _resolution_obj: Resolution _bool_ops: List[str] = [] _field_ops: List[str] = [] diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 250038726b719..4677faa6b7d24 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -6,7 +6,7 @@ import numpy as np from pandas._libs import NaT, Period, Timestamp, index as libindex, lib, tslib -from pandas._libs.tslibs import fields, parsing, resolution as libresolution, timezones +from pandas._libs.tslibs import Resolution, fields, parsing, timezones from pandas._libs.tslibs.frequencies import get_freq_group from pandas._libs.tslibs.offsets import prefix_mapping from pandas._typing import DtypeObj, Label @@ -72,7 +72,9 @@ def _new_DatetimeIndex(cls, d): DatetimeArray, wrap=True, ) -@inherit_names(["_timezone", "is_normalized", "_resolution"], DatetimeArray, cache=True) +@inherit_names( + ["_timezone", "is_normalized", "_resolution_obj"], DatetimeArray, cache=True +) @inherit_names( [ "_bool_ops", @@ -525,7 +527,7 @@ def _validate_partial_date_slice(self, reso: str): if ( self.is_monotonic and reso in ["day", "hour", "minute", "second"] - and self._resolution >= libresolution.Resolution.from_attrname(reso) + and self._resolution_obj >= Resolution.from_attrname(reso) ): # These resolution/monotonicity validations came from GH3931, # GH3452 and GH2369. diff --git a/pandas/tests/tslibs/test_api.py b/pandas/tests/tslibs/test_api.py index 5518760dbacb3..908f9fa699891 100644 --- a/pandas/tests/tslibs/test_api.py +++ b/pandas/tests/tslibs/test_api.py @@ -33,6 +33,7 @@ def test_namespace(): "OutOfBoundsDatetime", "Period", "IncompatibleFrequency", + "Resolution", "Timedelta", "Timestamp", "delta_to_nanoseconds",
rename _resolution -> _resolution_obj
https://api.github.com/repos/pandas-dev/pandas/pulls/34518
2020-06-01T20:15:58Z
2020-06-02T01:13:39Z
2020-06-02T01:13:39Z
2020-06-02T01:21:46Z
TST #22663: Operation between DataFrame with non-numeric types and incomplete series
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index e7b7f3e524d44..d9f251a1b5304 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -1536,3 +1536,18 @@ def test_dataframe_blockwise_slicelike(): expected = pd.DataFrame({i: left[i] + right[i] for i in left.columns}) tm.assert_frame_equal(res, expected) + + +@pytest.mark.parametrize( + "df, col_dtype", + [ + (pd.DataFrame([[1.0, 2.0], [4.0, 5.0]], columns=list("ab")), "float64"), + (pd.DataFrame([[1.0, "b"], [4.0, "b"]], columns=list("ab")), "object"), + ], +) +def test_dataframe_operation_with_non_numeric_types(df, col_dtype): + # GH #22663 + expected = pd.DataFrame([[0.0, np.nan], [3.0, np.nan]], columns=list("ab")) + expected = expected.astype({"b": col_dtype}) + result = df + pd.Series([-1.0], index=list("a")) + tm.assert_frame_equal(result, expected)
- [ x ] closes #22663 - [ 1 ] tests added / passed - [ x ] passes `black pandas` - [ x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/34517
2020-06-01T16:05:43Z
2020-06-02T22:21:05Z
2020-06-02T22:21:04Z
2020-06-03T06:48:29Z
CI: Linux py37_np_dev failing on 1.0.x
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index eecb1e567bd8f..d4b60ec186b86 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -558,7 +558,7 @@ def infer_dtype_from_scalar(val, pandas_dtype: bool = False): object """ - dtype = np.object_ + dtype = np.dtype(np.object_) # a 1-element ndarray if isinstance(val, np.ndarray): @@ -577,7 +577,7 @@ def infer_dtype_from_scalar(val, pandas_dtype: bool = False): # instead of np.empty (but then you still don't want things # coming out as np.str_! - dtype = np.object_ + dtype = np.dtype(np.object_) elif isinstance(val, (np.datetime64, datetime)): val = tslibs.Timestamp(val) @@ -588,7 +588,7 @@ def infer_dtype_from_scalar(val, pandas_dtype: bool = False): dtype = DatetimeTZDtype(unit="ns", tz=val.tz) else: # return datetimetz as object - return np.object_, val + return np.dtype(np.object_), val val = val.value elif isinstance(val, (np.timedelta64, timedelta)): @@ -596,22 +596,22 @@ def infer_dtype_from_scalar(val, pandas_dtype: bool = False): dtype = np.dtype("m8[ns]") elif is_bool(val): - dtype = np.bool_ + dtype = np.dtype(np.bool_) elif is_integer(val): if isinstance(val, np.integer): - dtype = type(val) + dtype = np.dtype(type(val)) else: - dtype = np.int64 + dtype = np.dtype(np.int64) elif is_float(val): if isinstance(val, np.floating): - dtype = type(val) + dtype = np.dtype(type(val)) else: - dtype = np.float64 + dtype = np.dtype(np.float64) elif is_complex(val): - dtype = np.complex_ + dtype = np.dtype(np.complex_) elif pandas_dtype: if lib.is_period(val): diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 3a92cfd9bf16d..0856f65306849 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -233,7 +233,7 @@ def init_dict(data, index, columns, dtype=None): if missing.any() and not is_integer_dtype(dtype): if dtype is None or np.issubdtype(dtype, np.flexible): # GH#1783 - nan_dtype = object + nan_dtype = np.dtype(object) else: nan_dtype = dtype val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype)
xref https://github.com/pandas-dev/pandas/pull/34503#issuecomment-636651219
https://api.github.com/repos/pandas-dev/pandas/pulls/34516
2020-06-01T15:14:54Z
2020-06-01T20:14:57Z
2020-06-01T20:14:57Z
2020-06-02T07:22:02Z
CLN: remove Resolution.get_stride_from_decimal
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 0caacd81c53f5..9e6a2c0507f7f 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -762,7 +762,7 @@ cdef class Tick(SingleConstructorOffset): return Micro(self.n * 1000) if type(self) is Micro: return Nano(self.n * 1000) - raise NotImplementedError(type(self)) + raise ValueError("Could not convert to integer offset at any resolution") # -------------------------------------------------------------------- diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx index b3fc1e32f68e8..91059638cd73a 100644 --- a/pandas/_libs/tslibs/resolution.pyx +++ b/pandas/_libs/tslibs/resolution.pyx @@ -27,16 +27,6 @@ cdef: int RESO_HR = 5 int RESO_DAY = 6 -reso_str_bump_map = { - "D": "H", - "H": "T", - "T": "S", - "S": "L", - "L": "U", - "U": "N", - "N": None, -} - _abbrev_to_attrnames = {v: k for k, v in attrname_to_abbrevs.items()} _reso_str_map = { @@ -168,19 +158,19 @@ class Resolution(Enum): return _reso_str_map[reso.value] @classmethod - def get_reso(cls, resostr: str) -> "Resolution": + def from_attrname(cls, attrname: str) -> "Resolution": """ Return resolution str against resolution code. Examples -------- - >>> Resolution.get_reso('second') + >>> Resolution.from_attrname('second') 2 - >>> Resolution.get_reso('second') == Resolution.RESO_SEC + >>> Resolution.from_attrname('second') == Resolution.RESO_SEC True """ - return cls(_str_reso_map[resostr]) + return cls(_str_reso_map[attrname]) @classmethod def get_attrname_from_abbrev(cls, freq: str) -> str: @@ -209,47 +199,7 @@ class Resolution(Enum): >>> Resolution.get_reso_from_freq('H') == Resolution.RESO_HR True """ - return cls.get_reso(cls.get_attrname_from_abbrev(freq)) - - @classmethod - def get_stride_from_decimal(cls, value: float, freq: str): - """ - Convert freq with decimal stride into a higher freq with integer stride - - Parameters - ---------- - value : float - freq : str - Frequency string - - Raises - ------ - ValueError - If the float cannot be converted to an integer at any resolution. - - Examples - -------- - >>> Resolution.get_stride_from_decimal(1.5, 'T') - (90, 'S') - - >>> Resolution.get_stride_from_decimal(1.04, 'H') - (3744, 'S') - - >>> Resolution.get_stride_from_decimal(1, 'D') - (1, 'D') - """ - if np.isclose(value % 1, 0): - return int(value), freq - else: - start_reso = cls.get_reso_from_freq(freq) - if start_reso.value == 0: - raise ValueError( - "Could not convert to integer offset at any resolution" - ) - - next_value = _reso_mult_map[start_reso.value] * value - next_name = reso_str_bump_map[freq] - return cls.get_stride_from_decimal(next_value, next_name) + return cls.from_attrname(cls.get_attrname_from_abbrev(freq)) # ---------------------------------------------------------------------- diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 877d19ef68558..250038726b719 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -525,7 +525,7 @@ def _validate_partial_date_slice(self, reso: str): if ( self.is_monotonic and reso in ["day", "hour", "minute", "second"] - and self._resolution >= libresolution.Resolution.get_reso(reso) + and self._resolution >= libresolution.Resolution.from_attrname(reso) ): # These resolution/monotonicity validations came from GH3931, # GH3452 and GH2369. diff --git a/pandas/tests/tseries/frequencies/test_freq_code.py b/pandas/tests/tseries/frequencies/test_freq_code.py index 51554854378ea..133db0c3d611b 100644 --- a/pandas/tests/tseries/frequencies/test_freq_code.py +++ b/pandas/tests/tseries/frequencies/test_freq_code.py @@ -10,6 +10,7 @@ ) from pandas._libs.tslibs.resolution import Resolution as _reso +from pandas.tseries.frequencies import to_offset import pandas.tseries.offsets as offsets @@ -116,7 +117,8 @@ def test_get_freq_roundtrip(freq): @pytest.mark.parametrize("freq", ["D", "H", "T", "S", "L", "U"]) def test_get_freq_roundtrip2(freq): - result = _attrname_to_abbrevs[_reso.get_str(_reso.get_reso_from_freq(freq))] + obj = _reso.get_reso_from_freq(freq) + result = _attrname_to_abbrevs[_reso.get_str(obj)] assert freq == result @@ -133,7 +135,9 @@ def test_get_freq_roundtrip2(freq): ) def test_resolution_bumping(args, expected): # see gh-14378 - assert _reso.get_stride_from_decimal(*args) == expected + off = to_offset(str(args[0]) + args[1]) + assert off.n == expected[0] + assert off._prefix == expected[1] @pytest.mark.parametrize( @@ -145,10 +149,10 @@ def test_resolution_bumping(args, expected): ], ) def test_cat(args): - msg = "Could not convert to integer offset at any resolution" + msg = "Invalid frequency" with pytest.raises(ValueError, match=msg): - _reso.get_stride_from_decimal(*args) + to_offset(str(args[0]) + args[1]) @pytest.mark.parametrize(
as it is no longer used; update its tests rename Resolution.get_reso -> Resolution.from_attrname
https://api.github.com/repos/pandas-dev/pandas/pulls/34515
2020-06-01T15:11:32Z
2020-06-01T21:55:36Z
2020-06-01T21:55:36Z
2020-06-01T22:57:15Z
BUG: asymmetric error bars for series (GH9536)
diff --git a/doc/source/user_guide/visualization.rst b/doc/source/user_guide/visualization.rst index 27826e7cde9e1..5bc87bca87211 100644 --- a/doc/source/user_guide/visualization.rst +++ b/doc/source/user_guide/visualization.rst @@ -1425,7 +1425,7 @@ Horizontal and vertical error bars can be supplied to the ``xerr`` and ``yerr`` * As a ``str`` indicating which of the columns of plotting :class:`DataFrame` contain the error values. * As raw values (``list``, ``tuple``, or ``np.ndarray``). Must be the same length as the plotting :class:`DataFrame`/:class:`Series`. -Asymmetrical error bars are also supported, however raw error values must be provided in this case. For a ``M`` length :class:`Series`, a ``Mx2`` array should be provided indicating lower and upper (or left and right) errors. For a ``MxN`` :class:`DataFrame`, asymmetrical errors should be in a ``Mx2xN`` array. +Asymmetrical error bars are also supported, however raw error values must be provided in this case. For a ``N`` length :class:`Series`, a ``2xN`` array should be provided indicating lower and upper (or left and right) errors. For a ``MxN`` :class:`DataFrame`, asymmetrical errors should be in a ``Mx2xN`` array. Here is an example of one way to easily plot group means with standard deviations from the raw data. diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index bdb844ded59b7..d74c1bca61de8 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -340,6 +340,7 @@ Other enhancements - :class:`pandas.core.window.ExponentialMovingWindow` now supports a ``times`` argument that allows ``mean`` to be calculated with observations spaced by the timestamps in ``times`` (:issue:`34839`) - :meth:`DataFrame.agg` and :meth:`Series.agg` now accept named aggregation for renaming the output columns/indexes. (:issue:`26513`) - ``compute.use_numba`` now exists as a configuration option that utilizes the numba engine when available (:issue:`33966`) +- :meth:`Series.plot` now supports asymmetric error bars. Previously, if :meth:`Series.plot` received a "2xN" array with error values for `yerr` and/or `xerr`, the left/lower values (first row) were mirrored, while the right/upper values (second row) were ignored. Now, the first row represents the left/lower error values and the second row the right/upper error values. (:issue:`9536`) .. --------------------------------------------------------------------------- diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index e510f7140519a..353bc8a8936a5 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -770,6 +770,12 @@ def _parse_errorbars(self, label, err): DataFrame/dict: error values are paired with keys matching the key in the plotted DataFrame str: the name of the column within the plotted DataFrame + + Asymmetrical error bars are also supported, however raw error values + must be provided in this case. For a ``N`` length :class:`Series`, a + ``2xN`` array should be provided indicating lower and upper (or left + and right) errors. For a ``MxN`` :class:`DataFrame`, asymmetrical errors + should be in a ``Mx2xN`` array. """ if err is None: return None @@ -810,7 +816,15 @@ def match_labels(data, e): err_shape = err.shape # asymmetrical error bars - if err.ndim == 3: + if isinstance(self.data, ABCSeries) and err_shape[0] == 2: + err = np.expand_dims(err, 0) + err_shape = err.shape + if err_shape[2] != len(self.data): + raise ValueError( + "Asymmetrical error bars should be provided " + f"with the shape (2, {len(self.data)})" + ) + elif isinstance(self.data, ABCDataFrame) and err.ndim == 3: if ( (err_shape[0] != self.nseries) or (err_shape[1] != 2) diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py index 64da98f57676f..316ca6ce91af7 100644 --- a/pandas/tests/plotting/test_series.py +++ b/pandas/tests/plotting/test_series.py @@ -729,6 +729,26 @@ def test_dup_datetime_index_plot(self): s = Series(values, index=index) _check_plot_works(s.plot) + def test_errorbar_asymmetrical(self): + # GH9536 + s = Series(np.arange(10), name="x") + err = np.random.rand(2, 10) + + ax = s.plot(yerr=err, xerr=err) + + result = np.vstack([i.vertices[:, 1] for i in ax.collections[1].get_paths()]) + expected = (err.T * np.array([-1, 1])) + s.to_numpy().reshape(-1, 1) + tm.assert_numpy_array_equal(result, expected) + + msg = ( + "Asymmetrical error bars should be provided " + f"with the shape \\(2, {len(s)}\\)" + ) + with pytest.raises(ValueError, match=msg): + s.plot(yerr=np.random.rand(2, 11)) + + tm.close() + @pytest.mark.slow def test_errorbar_plot(self):
closes #9536 This fix enables asymmetric errors bars for pd.Series plots. In the current implementation the bars are symmetric, even if two sets of errors are provided. Took inspirations from #12046
https://api.github.com/repos/pandas-dev/pandas/pulls/34514
2020-06-01T14:46:29Z
2020-07-17T16:26:47Z
2020-07-17T16:26:47Z
2020-07-17T16:46:15Z
Update : In DataFrame Class, Columns should not contain duplicate val…
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index dfc938918f03a..ce47314ad4328 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -357,6 +357,8 @@ class DataFrame(NDFrame): columns : Index or array-like Column labels to use for resulting frame. Will default to RangeIndex (0, 1, 2, ..., n) if no column labels are provided. + Columns Labels should not contain duplicate values, if column + labels are provided. dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer. copy : bool, default False @@ -442,6 +444,11 @@ def __init__( data = {} if dtype is not None: dtype = self._validate_dtype(dtype) + + #We need columns to have unique labels. + if columns != None: + if len(columns) != len(set(columns)): + raise Exception("Columns should not contain duplicate values.") if isinstance(data, DataFrame): data = data._mgr
Also updated the Docstring.(#12991)
https://api.github.com/repos/pandas-dev/pandas/pulls/34512
2020-06-01T13:52:48Z
2020-07-05T07:53:10Z
null
2020-07-05T07:53:10Z
BUG: fix BooleanArray.astype('string') (GH34110)
diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py index 6b7b282bfd940..5d791ffd20f01 100644 --- a/pandas/core/arrays/boolean.py +++ b/pandas/core/arrays/boolean.py @@ -370,11 +370,15 @@ def astype(self, dtype, copy: bool = True) -> ArrayLike: if incompatible type with an BooleanDtype, equivalent of same_kind casting """ + from pandas.core.arrays.string_ import StringDtype + dtype = pandas_dtype(dtype) if isinstance(dtype, BooleanDtype): values, mask = coerce_to_array(self, copy=copy) return BooleanArray(values, mask, copy=False) + elif isinstance(dtype, StringDtype): + return dtype.construct_array_type()._from_sequence(self, copy=False) if is_bool_dtype(dtype): # astype_nansafe converts np.nan to True
This should fix CI (bug caused by https://github.com/pandas-dev/pandas/pull/34110, due to another PR that was merged a few days ago adding this capability)
https://api.github.com/repos/pandas-dev/pandas/pulls/34509
2020-06-01T07:27:09Z
2020-06-01T08:07:26Z
2020-06-01T08:07:26Z
2020-06-01T08:29:49Z
DOC: Docstring updated for DataFrame.equals
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 843b602a12823..87f25f578c3c6 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1201,9 +1201,11 @@ def equals(self, other): This function allows two Series or DataFrames to be compared against each other to see if they have the same shape and elements. NaNs in - the same location are considered equal. The column headers do not - need to have the same type, but the elements within the columns must - be the same dtype. + the same location are considered equal. + + The row/column index do not need to have the same type, as long + as the values are considered equal. Corresponding columns must be of + the same dtype. Parameters ---------- @@ -1232,13 +1234,6 @@ def equals(self, other): numpy.array_equal : Return True if two arrays have the same shape and elements, False otherwise. - Notes - ----- - This function requires that the elements have the same dtype as their - respective elements in the other Series or DataFrame. However, the - column labels do not need to have the same type, as long as they are - still considered equal. - Examples -------- >>> df = pd.DataFrame({1: [10], 2: [20]})
- [ ] closes #34498
https://api.github.com/repos/pandas-dev/pandas/pulls/34508
2020-06-01T04:44:14Z
2020-08-07T19:16:40Z
2020-08-07T19:16:40Z
2020-08-07T19:16:54Z
REF: use standard pattern in normalize_i8_timestamps
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index cbb27bf8e9917..b0bad119d6a46 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -788,7 +788,16 @@ cpdef ndarray[int64_t] normalize_i8_timestamps(const int64_t[:] stamps, tzinfo t npy_datetimestruct dts int64_t delta, local_val - if is_tzlocal(tz): + if tz is None or is_utc(tz): + with nogil: + for i in range(n): + if stamps[i] == NPY_NAT: + result[i] = NPY_NAT + continue + local_val = stamps[i] + dt64_to_dtstruct(local_val, &dts) + result[i] = _normalized_stamp(&dts) + elif is_tzlocal(tz): for i in range(n): if stamps[i] == NPY_NAT: result[i] = NPY_NAT diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index a7c4b44cf95f8..fad87f9f910cb 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -1451,13 +1451,8 @@ default 'raise' ndarray[int64_t] normalized tzinfo own_tz = self.tzinfo # could be None - if own_tz is None or is_utc(own_tz): - DAY_NS = ccalendar.DAY_NANOS - normalized_value = self.value - (self.value % DAY_NS) - return Timestamp(normalized_value).tz_localize(own_tz) - normalized = normalize_i8_timestamps( - np.array([self.value], dtype='i8'), tz=own_tz) + np.array([self.value], dtype="i8"), tz=own_tz) return Timestamp(normalized[0]).tz_localize(own_tz) diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 4e31477571a5f..bade1c031d556 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -9,15 +9,14 @@ from pandas._libs.tslibs import ( NaT, Timestamp, - ccalendar, conversion, fields, + frequencies as libfrequencies, iNaT, resolution as libresolution, timezones, tzconversion, ) -import pandas._libs.tslibs.frequencies as libfrequencies from pandas.errors import PerformanceWarning from pandas.core.dtypes.common import ( @@ -1036,14 +1035,7 @@ def normalize(self): '2014-08-01 00:00:00+05:30'], dtype='datetime64[ns, Asia/Calcutta]', freq=None) """ - if self.tz is None or timezones.is_utc(self.tz): - not_null = ~self.isna() - DAY_NS = ccalendar.DAY_SECONDS * 1_000_000_000 - new_values = self.asi8.copy() - adjustment = new_values[not_null] % DAY_NS - new_values[not_null] = new_values[not_null] - adjustment - else: - new_values = conversion.normalize_i8_timestamps(self.asi8, self.tz) + new_values = conversion.normalize_i8_timestamps(self.asi8, self.tz) return type(self)(new_values)._with_freq("infer").tz_localize(self.tz) def to_period(self, freq=None):
From here the plan is to move to passing `int64_t*` which will allow us to share code between the scalar/vector versions of this function
https://api.github.com/repos/pandas-dev/pandas/pulls/34507
2020-06-01T00:04:20Z
2020-06-01T02:31:19Z
2020-06-01T02:31:19Z
2020-06-09T15:00:58Z
CLN: unreachable branch in tzconversion
diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx index 030549f2528ed..575088b8ab2cc 100644 --- a/pandas/_libs/tslibs/tzconversion.pyx +++ b/pandas/_libs/tslibs/tzconversion.pyx @@ -557,30 +557,25 @@ cdef int64_t[:] _tz_convert_dst( ndarray[int64_t] trans int64_t[:] deltas int64_t v - bint tz_is_local - tz_is_local = is_tzlocal(tz) + # tz is assumed _not_ to be tzlocal; that should go + # through _tz_convert_tzlocal_utc - if not tz_is_local: - # get_dst_info cannot extract offsets from tzlocal because its - # dependent on a datetime - trans, deltas, _ = get_dst_info(tz) - if not to_utc: - # We add `offset` below instead of subtracting it - deltas = -1 * np.array(deltas, dtype='i8') + trans, deltas, _ = get_dst_info(tz) + if not to_utc: + # We add `offset` below instead of subtracting it + deltas = -1 * np.array(deltas, dtype='i8') - # Previously, this search was done pointwise to try and benefit - # from getting to skip searches for iNaTs. However, it seems call - # overhead dominates the search time so doing it once in bulk - # is substantially faster (GH#24603) - pos = trans.searchsorted(values, side='right') - 1 + # Previously, this search was done pointwise to try and benefit + # from getting to skip searches for iNaTs. However, it seems call + # overhead dominates the search time so doing it once in bulk + # is substantially faster (GH#24603) + pos = trans.searchsorted(values, side='right') - 1 for i in range(n): v = values[i] if v == NPY_NAT: result[i] = v - elif tz_is_local: - result[i] = _tz_convert_tzlocal_utc(v, tz, to_utc=to_utc) else: if pos[i] < 0: raise ValueError('First time before start of DST info')
https://api.github.com/repos/pandas-dev/pandas/pulls/34505
2020-05-31T19:11:27Z
2020-05-31T21:46:24Z
2020-05-31T21:46:24Z
2020-05-31T22:34:18Z
CLN: stronger typing in tzconversion
diff --git a/pandas/_libs/tslibs/tzconversion.pxd b/pandas/_libs/tslibs/tzconversion.pxd index 190205d9c3c44..7f445d7549f45 100644 --- a/pandas/_libs/tslibs/tzconversion.pxd +++ b/pandas/_libs/tslibs/tzconversion.pxd @@ -3,4 +3,4 @@ from numpy cimport int64_t cdef int64_t tz_convert_utc_to_tzlocal(int64_t utc_val, tzinfo tz, bint* fold=*) -cpdef int64_t tz_convert_single(int64_t val, object tz1, object tz2) +cpdef int64_t tz_convert_single(int64_t val, tzinfo tz1, tzinfo tz2) diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx index 030549f2528ed..ffd076d0608f2 100644 --- a/pandas/_libs/tslibs/tzconversion.pyx +++ b/pandas/_libs/tslibs/tzconversion.pyx @@ -28,7 +28,7 @@ from pandas._libs.tslibs.timezones cimport ( # TODO: cdef scalar version to call from convert_str_to_tsobject @cython.boundscheck(False) @cython.wraparound(False) -def tz_localize_to_utc(ndarray[int64_t] vals, object tz, object ambiguous=None, +def tz_localize_to_utc(ndarray[int64_t] vals, tzinfo tz, object ambiguous=None, object nonexistent=None): """ Localize tzinfo-naive i8 to given time zone (using pytz). If @@ -329,7 +329,7 @@ cdef int64_t tz_convert_utc_to_tzlocal(int64_t utc_val, tzinfo tz, bint* fold=NU return _tz_convert_tzlocal_utc(utc_val, tz, to_utc=False, fold=fold) -cpdef int64_t tz_convert_single(int64_t val, object tz1, object tz2): +cpdef int64_t tz_convert_single(int64_t val, tzinfo tz1, tzinfo tz2): """ Convert the val (in i8) from timezone1 to timezone2 @@ -338,18 +338,15 @@ cpdef int64_t tz_convert_single(int64_t val, object tz1, object tz2): Parameters ---------- val : int64 - tz1 : string / timezone object - tz2 : string / timezone object + tz1 : tzinfo + tz2 : tzinfo Returns ------- converted: int64 """ cdef: - int64_t[:] deltas - Py_ssize_t pos - int64_t v, offset, utc_date - npy_datetimestruct dts + int64_t utc_date int64_t arr[1] # See GH#17734 We should always be converting either from UTC or to UTC @@ -381,17 +378,15 @@ cpdef int64_t tz_convert_single(int64_t val, object tz1, object tz2): return _tz_convert_dst(arr, tz2, to_utc=False)[0] -@cython.boundscheck(False) -@cython.wraparound(False) -def tz_convert(int64_t[:] vals, object tz1, object tz2): +def tz_convert(int64_t[:] vals, tzinfo tz1, tzinfo tz2): """ Convert the values (in i8) from timezone1 to timezone2 Parameters ---------- vals : int64 ndarray - tz1 : string / timezone object - tz2 : string / timezone object + tz1 : tzinfo + tz2 : tzinfo Returns ------- @@ -411,15 +406,15 @@ def tz_convert(int64_t[:] vals, object tz1, object tz2): @cython.boundscheck(False) @cython.wraparound(False) -cdef int64_t[:] _tz_convert_one_way(int64_t[:] vals, object tz, bint to_utc): +cdef int64_t[:] _tz_convert_one_way(int64_t[:] vals, tzinfo tz, bint to_utc): """ Convert the given values (in i8) either to UTC or from UTC. Parameters ---------- vals : int64 ndarray - tz1 : string / timezone object - to_utc : bint + tz1 : tzinfo + to_utc : bool Returns ------- @@ -430,7 +425,7 @@ cdef int64_t[:] _tz_convert_one_way(int64_t[:] vals, object tz, bint to_utc): Py_ssize_t i, n = len(vals) int64_t val - if not is_utc(get_timezone(tz)): + if not is_utc(tz): converted = np.empty(n, dtype=np.int64) if is_tzlocal(tz): for i in range(n):
https://api.github.com/repos/pandas-dev/pandas/pulls/34504
2020-05-31T18:33:03Z
2020-05-31T21:47:35Z
2020-05-31T21:47:35Z
2020-05-31T22:39:12Z
Backport PR #34481 on branch 1.0.x (DOC: start 1.0.5)
diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst index 0e0c7492da3be..dec0807b8aad1 100644 --- a/doc/source/whatsnew/index.rst +++ b/doc/source/whatsnew/index.rst @@ -16,6 +16,7 @@ Version 1.0 .. toctree:: :maxdepth: 2 + v1.0.5 v1.0.4 v1.0.3 v1.0.2 diff --git a/doc/source/whatsnew/v1.0.4.rst b/doc/source/whatsnew/v1.0.4.rst index 5cc1edc9ca9cd..84b7e7d45e8b7 100644 --- a/doc/source/whatsnew/v1.0.4.rst +++ b/doc/source/whatsnew/v1.0.4.rst @@ -45,4 +45,4 @@ Bug fixes Contributors ~~~~~~~~~~~~ -.. contributors:: v1.0.3..v1.0.4|HEAD +.. contributors:: v1.0.3..v1.0.4 diff --git a/doc/source/whatsnew/v1.0.5.rst b/doc/source/whatsnew/v1.0.5.rst new file mode 100644 index 0000000000000..1edc7e1cad72f --- /dev/null +++ b/doc/source/whatsnew/v1.0.5.rst @@ -0,0 +1,31 @@ + +.. _whatsnew_105: + +What's new in 1.0.5 (June XX, 2020) +----------------------------------- + +These are the changes in pandas 1.0.5. See :ref:`release` for a full changelog +including other versions of pandas. + +{{ header }} + +.. --------------------------------------------------------------------------- + +.. _whatsnew_105.regressions: + +Fixed regressions +~~~~~~~~~~~~~~~~~ +- +- + +.. _whatsnew_105.bug_fixes: + +Bug fixes +~~~~~~~~~ +- +- + +Contributors +~~~~~~~~~~~~ + +.. contributors:: v1.0.4..v1.0.5|HEAD
Backport PR #34481: DOC: start 1.0.5
https://api.github.com/repos/pandas-dev/pandas/pulls/34503
2020-05-31T18:05:26Z
2020-06-01T07:00:22Z
2020-06-01T07:00:22Z
2020-06-01T14:25:15Z
CLN: GH29547 format with f-strings
diff --git a/pandas/tests/series/indexing/test_take.py b/pandas/tests/series/indexing/test_take.py index 9368d49e5ff2b..dc161b6be5d66 100644 --- a/pandas/tests/series/indexing/test_take.py +++ b/pandas/tests/series/indexing/test_take.py @@ -16,10 +16,10 @@ def test_take(): expected = Series([4, 2, 4], index=[4, 3, 4]) tm.assert_series_equal(actual, expected) - msg = "index {} is out of bounds for( axis 0 with)? size 5" - with pytest.raises(IndexError, match=msg.format(10)): + msg = lambda x: f"index {x} is out of bounds for( axis 0 with)? size 5" + with pytest.raises(IndexError, match=msg(10)): ser.take([1, 10]) - with pytest.raises(IndexError, match=msg.format(5)): + with pytest.raises(IndexError, match=msg(5)): ser.take([2, 5]) diff --git a/pandas/tests/series/indexing/test_where.py b/pandas/tests/series/indexing/test_where.py index 3f85abb4b2817..c4a2cb90f7090 100644 --- a/pandas/tests/series/indexing/test_where.py +++ b/pandas/tests/series/indexing/test_where.py @@ -222,12 +222,14 @@ def test_where_setitem_invalid(): # GH 2702 # make sure correct exceptions are raised on invalid list assignment - msg = "cannot set using a {} indexer with a different length than the value" - + msg = ( + lambda x: f"cannot set using a {x} indexer with a " + "different length than the value" + ) # slice s = Series(list("abc")) - with pytest.raises(ValueError, match=msg.format("slice")): + with pytest.raises(ValueError, match=msg("slice")): s[0:3] = list(range(27)) s[0:3] = list(range(3)) @@ -237,7 +239,7 @@ def test_where_setitem_invalid(): # slice with step s = Series(list("abcdef")) - with pytest.raises(ValueError, match=msg.format("slice")): + with pytest.raises(ValueError, match=msg("slice")): s[0:4:2] = list(range(27)) s = Series(list("abcdef")) @@ -248,7 +250,7 @@ def test_where_setitem_invalid(): # neg slices s = Series(list("abcdef")) - with pytest.raises(ValueError, match=msg.format("slice")): + with pytest.raises(ValueError, match=msg("slice")): s[:-1] = list(range(27)) s[-3:-1] = list(range(2)) @@ -258,12 +260,12 @@ def test_where_setitem_invalid(): # list s = Series(list("abc")) - with pytest.raises(ValueError, match=msg.format("list-like")): + with pytest.raises(ValueError, match=msg("list-like")): s[[0, 1, 2]] = list(range(27)) s = Series(list("abc")) - with pytest.raises(ValueError, match=msg.format("list-like")): + with pytest.raises(ValueError, match=msg("list-like")): s[[0, 1, 2]] = list(range(2)) # scalar
replace .format() for f-strings in the following: 1. pandas/tests/series/indexing/test_numeric.py 2. pandas/tests/series/indexing/test_take.py 3. pandas/tests/series/indexing/test_where.py
https://api.github.com/repos/pandas-dev/pandas/pulls/34502
2020-05-31T17:52:39Z
2020-06-20T19:59:22Z
2020-06-20T19:59:22Z
2020-06-20T19:59:26Z
REG: Fix read_parquet from file-like objects
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index cde7a98eb42ae..de9a14c82b3cb 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -122,11 +122,20 @@ def write( file_obj_or_path.close() def read(self, path, columns=None, **kwargs): - parquet_ds = self.api.parquet.ParquetDataset( - path, filesystem=get_fs_for_path(path), **kwargs - ) - kwargs["columns"] = columns - result = parquet_ds.read_pandas(**kwargs).to_pandas() + fs = get_fs_for_path(path) + should_close = None + # Avoid calling get_filepath_or_buffer for s3/gcs URLs since + # since it returns an S3File which doesn't support dir reads in arrow + if not fs: + path, _, _, should_close = get_filepath_or_buffer(path) + + kwargs["use_pandas_metadata"] = True + result = self.api.parquet.read_table( + path, columns=columns, filesystem=fs, **kwargs + ).to_pandas() + if should_close: + path.close() + return result diff --git a/pandas/tests/io/data/parquet/simple.parquet b/pandas/tests/io/data/parquet/simple.parquet new file mode 100644 index 0000000000000..2862a91f508ea Binary files /dev/null and b/pandas/tests/io/data/parquet/simple.parquet differ diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index 8a43d4079159b..7ee551194bf76 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -1,6 +1,7 @@ """ test parquet compat """ import datetime from distutils.version import LooseVersion +from io import BytesIO import os from warnings import catch_warnings @@ -567,6 +568,23 @@ def test_s3_roundtrip_for_dir(self, df_compat, s3_resource, pa, partition_col): repeat=1, ) + @tm.network + @td.skip_if_no("pyarrow") + def test_parquet_read_from_url(self, df_compat): + url = ( + "https://raw.githubusercontent.com/pandas-dev/pandas/" + "master/pandas/tests/io/data/parquet/simple.parquet" + ) + df = pd.read_parquet(url) + tm.assert_frame_equal(df, df_compat) + + @td.skip_if_no("pyarrow") + def test_read_file_like_obj_support(self, df_compat): + buffer = BytesIO() + df_compat.to_parquet(buffer) + df_from_buf = pd.read_parquet(buffer) + tm.assert_frame_equal(df_compat, df_from_buf) + def test_partition_cols_supported(self, pa, df_full): # GH #23283 partition_cols = ["bool", "int"]
- [x] xref #34467 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry - waiting on https://github.com/pandas-dev/pandas/pull/34481 Use arrow parquet.read_table opposed to ParquetDataset
https://api.github.com/repos/pandas-dev/pandas/pulls/34500
2020-05-31T16:58:23Z
2020-06-12T18:17:07Z
2020-06-12T18:17:07Z
2020-06-12T22:33:50Z
REF/PERF: PeriodDtype decouple from DateOffset
diff --git a/pandas/_libs/tslibs/dtypes.pxd b/pandas/_libs/tslibs/dtypes.pxd new file mode 100644 index 0000000000000..23c473726e5a9 --- /dev/null +++ b/pandas/_libs/tslibs/dtypes.pxd @@ -0,0 +1,56 @@ + +cdef enum PeriodDtypeCode: + # Annual freqs with various fiscal year ends. + # eg, 2005 for A_FEB runs Mar 1, 2004 to Feb 28, 2005 + A = 1000 # Default alias + A_DEC = 1000 # Annual - December year end + A_JAN = 1001 # Annual - January year end + A_FEB = 1002 # Annual - February year end + A_MAR = 1003 # Annual - March year end + A_APR = 1004 # Annual - April year end + A_MAY = 1005 # Annual - May year end + A_JUN = 1006 # Annual - June year end + A_JUL = 1007 # Annual - July year end + A_AUG = 1008 # Annual - August year end + A_SEP = 1009 # Annual - September year end + A_OCT = 1010 # Annual - October year end + A_NOV = 1011 # Annual - November year end + + # Quarterly frequencies with various fiscal year ends. + # eg, Q42005 for Q_OCT runs Aug 1, 2005 to Oct 31, 2005 + Q_DEC = 2000 # Quarterly - December year end + Q_JAN = 2001 # Quarterly - January year end + Q_FEB = 2002 # Quarterly - February year end + Q_MAR = 2003 # Quarterly - March year end + Q_APR = 2004 # Quarterly - April year end + Q_MAY = 2005 # Quarterly - May year end + Q_JUN = 2006 # Quarterly - June year end + Q_JUL = 2007 # Quarterly - July year end + Q_AUG = 2008 # Quarterly - August year end + Q_SEP = 2009 # Quarterly - September year end + Q_OCT = 2010 # Quarterly - October year end + Q_NOV = 2011 # Quarterly - November year end + + M = 3000 # Monthly + + W_SUN = 4000 # Weekly - Sunday end of week + W_MON = 4001 # Weekly - Monday end of week + W_TUE = 4002 # Weekly - Tuesday end of week + W_WED = 4003 # Weekly - Wednesday end of week + W_THU = 4004 # Weekly - Thursday end of week + W_FRI = 4005 # Weekly - Friday end of week + W_SAT = 4006 # Weekly - Saturday end of week + + B = 5000 # Business days + D = 6000 # Daily + H = 7000 # Hourly + T = 8000 # Minutely + S = 9000 # Secondly + L = 10000 # Millisecondly + U = 11000 # Microsecondly + N = 12000 # Nanosecondly + + +cdef class PeriodPseudoDtype: + cdef readonly: + PeriodDtypeCode dtype_code diff --git a/pandas/_libs/tslibs/dtypes.pyx b/pandas/_libs/tslibs/dtypes.pyx new file mode 100644 index 0000000000000..d0d4e579a456b --- /dev/null +++ b/pandas/_libs/tslibs/dtypes.pyx @@ -0,0 +1,108 @@ +# period frequency constants corresponding to scikits timeseries +# originals + + +cdef class PeriodPseudoDtype: + """ + Similar to an actual dtype, this contains all of the information + describing a PeriodDtype in an integer code. + """ + # cdef readonly: + # PeriodDtypeCode dtype_code + + def __cinit__(self, PeriodDtypeCode code): + self.dtype_code = code + + def __eq__(self, other): + if not isinstance(other, PeriodPseudoDtype): + return False + if not isinstance(self, PeriodPseudoDtype): + # cython semantics, this is a reversed op + return False + return self.dtype_code == other.dtype_code + + @property + def date_offset(self): + """ + Corresponding DateOffset object. + + This mapping is mainly for backward-compatibility. + """ + from .offsets import to_offset + + freqstr = _reverse_period_code_map.get(self.dtype_code) + # equiv: freqstr = libfrequencies.get_freq_str(self.dtype_code) + + return to_offset(freqstr) + + @classmethod + def from_date_offset(cls, offset): + code = offset._period_dtype_code + return cls(code) + + +_period_code_map = { + # Annual freqs with various fiscal year ends. + # eg, 2005 for A-FEB runs Mar 1, 2004 to Feb 28, 2005 + "A-DEC": 1000, # Annual - December year end + "A-JAN": 1001, # Annual - January year end + "A-FEB": 1002, # Annual - February year end + "A-MAR": 1003, # Annual - March year end + "A-APR": 1004, # Annual - April year end + "A-MAY": 1005, # Annual - May year end + "A-JUN": 1006, # Annual - June year end + "A-JUL": 1007, # Annual - July year end + "A-AUG": 1008, # Annual - August year end + "A-SEP": 1009, # Annual - September year end + "A-OCT": 1010, # Annual - October year end + "A-NOV": 1011, # Annual - November year end + + # Quarterly frequencies with various fiscal year ends. + # eg, Q42005 for Q-OCT runs Aug 1, 2005 to Oct 31, 2005 + "Q-DEC": 2000, # Quarterly - December year end + "Q-JAN": 2001, # Quarterly - January year end + "Q-FEB": 2002, # Quarterly - February year end + "Q-MAR": 2003, # Quarterly - March year end + "Q-APR": 2004, # Quarterly - April year end + "Q-MAY": 2005, # Quarterly - May year end + "Q-JUN": 2006, # Quarterly - June year end + "Q-JUL": 2007, # Quarterly - July year end + "Q-AUG": 2008, # Quarterly - August year end + "Q-SEP": 2009, # Quarterly - September year end + "Q-OCT": 2010, # Quarterly - October year end + "Q-NOV": 2011, # Quarterly - November year end + + "M": 3000, # Monthly + + "W-SUN": 4000, # Weekly - Sunday end of week + "W-MON": 4001, # Weekly - Monday end of week + "W-TUE": 4002, # Weekly - Tuesday end of week + "W-WED": 4003, # Weekly - Wednesday end of week + "W-THU": 4004, # Weekly - Thursday end of week + "W-FRI": 4005, # Weekly - Friday end of week + "W-SAT": 4006, # Weekly - Saturday end of week + + "B": 5000, # Business days + "D": 6000, # Daily + "H": 7000, # Hourly + "T": 8000, # Minutely + "S": 9000, # Secondly + "L": 10000, # Millisecondly + "U": 11000, # Microsecondly + "N": 12000, # Nanosecondly +} + +_reverse_period_code_map = { + _period_code_map[key]: key for key in _period_code_map} + +# Yearly aliases; careful not to put these in _reverse_period_code_map +_period_code_map.update({"Y" + key[1:]: _period_code_map[key] + for key in _period_code_map + if key.startswith("A-")}) + +_period_code_map.update({ + "Q": 2000, # Quarterly - December year end (default quarterly) + "A": 1000, # Annual + "W": 4000, # Weekly + "C": 5000, # Custom Business Day +}) diff --git a/pandas/_libs/tslibs/frequencies.pyx b/pandas/_libs/tslibs/frequencies.pyx index 8246e24319dbd..8ca442de59f9f 100644 --- a/pandas/_libs/tslibs/frequencies.pyx +++ b/pandas/_libs/tslibs/frequencies.pyx @@ -12,6 +12,8 @@ from pandas._libs.tslibs.offsets import ( opattern, ) +from .dtypes import _period_code_map, _reverse_period_code_map + # --------------------------------------------------------------------- # Period codes @@ -31,73 +33,6 @@ class FreqGroup: FR_NS = 12000 -# period frequency constants corresponding to scikits timeseries -# originals -_period_code_map = { - # Annual freqs with various fiscal year ends. - # eg, 2005 for A-FEB runs Mar 1, 2004 to Feb 28, 2005 - "A-DEC": 1000, # Annual - December year end - "A-JAN": 1001, # Annual - January year end - "A-FEB": 1002, # Annual - February year end - "A-MAR": 1003, # Annual - March year end - "A-APR": 1004, # Annual - April year end - "A-MAY": 1005, # Annual - May year end - "A-JUN": 1006, # Annual - June year end - "A-JUL": 1007, # Annual - July year end - "A-AUG": 1008, # Annual - August year end - "A-SEP": 1009, # Annual - September year end - "A-OCT": 1010, # Annual - October year end - "A-NOV": 1011, # Annual - November year end - - # Quarterly frequencies with various fiscal year ends. - # eg, Q42005 for Q-OCT runs Aug 1, 2005 to Oct 31, 2005 - "Q-DEC": 2000, # Quarterly - December year end - "Q-JAN": 2001, # Quarterly - January year end - "Q-FEB": 2002, # Quarterly - February year end - "Q-MAR": 2003, # Quarterly - March year end - "Q-APR": 2004, # Quarterly - April year end - "Q-MAY": 2005, # Quarterly - May year end - "Q-JUN": 2006, # Quarterly - June year end - "Q-JUL": 2007, # Quarterly - July year end - "Q-AUG": 2008, # Quarterly - August year end - "Q-SEP": 2009, # Quarterly - September year end - "Q-OCT": 2010, # Quarterly - October year end - "Q-NOV": 2011, # Quarterly - November year end - - "M": 3000, # Monthly - - "W-SUN": 4000, # Weekly - Sunday end of week - "W-MON": 4001, # Weekly - Monday end of week - "W-TUE": 4002, # Weekly - Tuesday end of week - "W-WED": 4003, # Weekly - Wednesday end of week - "W-THU": 4004, # Weekly - Thursday end of week - "W-FRI": 4005, # Weekly - Friday end of week - "W-SAT": 4006, # Weekly - Saturday end of week - - "B": 5000, # Business days - "D": 6000, # Daily - "H": 7000, # Hourly - "T": 8000, # Minutely - "S": 9000, # Secondly - "L": 10000, # Millisecondly - "U": 11000, # Microsecondly - "N": 12000} # Nanosecondly - - -_reverse_period_code_map = { - _period_code_map[key]: key for key in _period_code_map} - -# Yearly aliases; careful not to put these in _reverse_period_code_map -_period_code_map.update({'Y' + key[1:]: _period_code_map[key] - for key in _period_code_map - if key.startswith('A-')}) - -_period_code_map.update({ - "Q": 2000, # Quarterly - December year end (default quarterly) - "A": 1000, # Annual - "W": 4000, # Weekly - "C": 5000}) # Custom Business Day - # Map attribute-name resolutions to resolution abbreviations _attrname_to_abbrevs = { "year": "A", diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 77b60d0c22322..63dc3407b4c55 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -48,6 +48,7 @@ from pandas._libs.tslibs.np_datetime cimport ( from pandas._libs.tslibs.timezones cimport utc_pytz as UTC from pandas._libs.tslibs.tzconversion cimport tz_convert_single +from .dtypes cimport PeriodDtypeCode from .timedeltas cimport delta_to_nanoseconds @@ -892,36 +893,43 @@ cdef class Tick(SingleConstructorOffset): cdef class Day(Tick): _nanos_inc = 24 * 3600 * 1_000_000_000 _prefix = "D" + _period_dtype_code = PeriodDtypeCode.D cdef class Hour(Tick): _nanos_inc = 3600 * 1_000_000_000 _prefix = "H" + _period_dtype_code = PeriodDtypeCode.H cdef class Minute(Tick): _nanos_inc = 60 * 1_000_000_000 _prefix = "T" + _period_dtype_code = PeriodDtypeCode.T cdef class Second(Tick): _nanos_inc = 1_000_000_000 _prefix = "S" + _period_dtype_code = PeriodDtypeCode.S cdef class Milli(Tick): _nanos_inc = 1_000_000 _prefix = "L" + _period_dtype_code = PeriodDtypeCode.L cdef class Micro(Tick): _nanos_inc = 1000 _prefix = "U" + _period_dtype_code = PeriodDtypeCode.U cdef class Nano(Tick): _nanos_inc = 1 _prefix = "N" + _period_dtype_code = PeriodDtypeCode.N def delta_to_tick(delta: timedelta) -> Tick: @@ -1281,7 +1289,7 @@ cdef class BusinessDay(BusinessMixin): """ DateOffset subclass representing possibly n business days. """ - + _period_dtype_code = PeriodDtypeCode.B _prefix = "B" _attributes = tuple(["n", "normalize", "offset"]) @@ -1945,6 +1953,15 @@ cdef class YearEnd(YearOffset): _prefix = "A" _day_opt = "end" + cdef readonly: + int _period_dtype_code + + def __init__(self, n=1, normalize=False, month=None): + # Because YearEnd can be the freq for a Period, define its + # _period_dtype_code at construction for performance + YearOffset.__init__(self, n, normalize, month) + self._period_dtype_code = PeriodDtypeCode.A + self.month % 12 + cdef class YearBegin(YearOffset): """ @@ -2099,6 +2116,14 @@ cdef class QuarterEnd(QuarterOffset): _prefix = "Q" _day_opt = "end" + cdef readonly: + int _period_dtype_code + + def __init__(self, n=1, normalize=False, startingMonth=None): + # Because QuarterEnd can be the freq for a Period, define its + # _period_dtype_code at construction for performance + QuarterOffset.__init__(self, n, normalize, startingMonth) + self._period_dtype_code = PeriodDtypeCode.Q_DEC + self.startingMonth % 12 cdef class QuarterBegin(QuarterOffset): """ @@ -2148,6 +2173,7 @@ cdef class MonthEnd(MonthOffset): """ DateOffset of one month end. """ + _period_dtype_code = PeriodDtypeCode.M _prefix = "M" _day_opt = "end" @@ -2452,6 +2478,7 @@ cdef class Week(SingleConstructorOffset): cdef readonly: object weekday # int or None + int _period_dtype_code def __init__(self, n=1, normalize=False, weekday=None): BaseOffset.__init__(self, n, normalize) @@ -2461,6 +2488,8 @@ cdef class Week(SingleConstructorOffset): if self.weekday < 0 or self.weekday > 6: raise ValueError(f"Day must be 0<=day<=6, got {self.weekday}") + self._period_dtype_code = PeriodDtypeCode.W_SUN + (weekday + 1) % 7 + cpdef __setstate__(self, state): self.n = state.pop("n") self.normalize = state.pop("normalize") diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 14cce1c000207..e88a20bc549bd 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -55,6 +55,9 @@ from pandas._libs.tslibs.ccalendar cimport ( get_days_in_month, ) from pandas._libs.tslibs.ccalendar cimport c_MONTH_NUMBERS + +from pandas._libs.tslibs.dtypes cimport PeriodPseudoDtype + from pandas._libs.tslibs.frequencies cimport ( attrname_to_abbrevs, get_freq_code, @@ -1514,11 +1517,16 @@ cdef class _Period: cdef readonly: int64_t ordinal + PeriodPseudoDtype _dtype BaseOffset freq def __cinit__(self, int64_t ordinal, BaseOffset freq): self.ordinal = ordinal self.freq = freq + # Note: this is more performant than PeriodDtype.from_date_offset(freq) + # because from_date_offset cannot be made a cdef method (until cython + # supported cdef classmethods) + self._dtype = PeriodPseudoDtype(freq._period_dtype_code) @classmethod def _maybe_convert_freq(cls, object freq): @@ -1662,13 +1670,13 @@ cdef class _Period: """ freq = self._maybe_convert_freq(freq) how = validate_end_alias(how) - base1, mult1 = get_freq_code(self.freq) - base2, mult2 = get_freq_code(freq) + base1 = self._dtype.dtype_code + base2, _ = get_freq_code(freq) - # mult1 can't be negative or 0 + # self.n can't be negative or 0 end = how == 'E' if end: - ordinal = self.ordinal + mult1 - 1 + ordinal = self.ordinal + self.freq.n - 1 else: ordinal = self.ordinal ordinal = period_asfreq(ordinal, base1, base2, end) @@ -1751,12 +1759,12 @@ cdef class _Period: return endpoint - Timedelta(1, 'ns') if freq is None: - base, mult = get_freq_code(self.freq) + base = self._dtype.dtype_code freq = get_to_timestamp_base(base) else: freq = self._maybe_convert_freq(freq) - base, mult = get_freq_code(freq) + base, _ = get_freq_code(freq) val = self.asfreq(freq, how) dt64 = period_ordinal_to_dt64(val.ordinal, base) @@ -1764,12 +1772,12 @@ cdef class _Period: @property def year(self) -> int: - base, mult = get_freq_code(self.freq) + base = self._dtype.dtype_code return pyear(self.ordinal, base) @property def month(self) -> int: - base, mult = get_freq_code(self.freq) + base = self._dtype.dtype_code return pmonth(self.ordinal, base) @property @@ -1792,7 +1800,7 @@ cdef class _Period: >>> p.day 11 """ - base, mult = get_freq_code(self.freq) + base = self._dtype.dtype_code return pday(self.ordinal, base) @property @@ -1822,7 +1830,7 @@ cdef class _Period: >>> p.hour 0 """ - base, mult = get_freq_code(self.freq) + base = self._dtype.dtype_code return phour(self.ordinal, base) @property @@ -1846,7 +1854,7 @@ cdef class _Period: >>> p.minute 3 """ - base, mult = get_freq_code(self.freq) + base = self._dtype.dtype_code return pminute(self.ordinal, base) @property @@ -1870,12 +1878,12 @@ cdef class _Period: >>> p.second 12 """ - base, mult = get_freq_code(self.freq) + base = self._dtype.dtype_code return psecond(self.ordinal, base) @property def weekofyear(self) -> int: - base, mult = get_freq_code(self.freq) + base = self._dtype.dtype_code return pweek(self.ordinal, base) @property @@ -1956,7 +1964,7 @@ cdef class _Period: >>> per.end_time.dayofweek 2 """ - base, mult = get_freq_code(self.freq) + base = self._dtype.dtype_code return pweekday(self.ordinal, base) @property @@ -2044,12 +2052,12 @@ cdef class _Period: >>> period.dayofyear 1 """ - base, mult = get_freq_code(self.freq) + base = self._dtype.dtype_code return pday_of_year(self.ordinal, base) @property def quarter(self) -> int: - base, mult = get_freq_code(self.freq) + base = self._dtype.dtype_code return pquarter(self.ordinal, base) @property @@ -2093,7 +2101,7 @@ cdef class _Period: >>> per.year 2017 """ - base, mult = get_freq_code(self.freq) + base = self._dtype.dtype_code return pqyear(self.ordinal, base) @property @@ -2127,7 +2135,7 @@ cdef class _Period: >>> p.days_in_month 29 """ - base, mult = get_freq_code(self.freq) + base = self._dtype.dtype_code return pdays_in_month(self.ordinal, base) @property @@ -2165,7 +2173,7 @@ cdef class _Period: return self.freq.freqstr def __repr__(self) -> str: - base, mult = get_freq_code(self.freq) + base = self._dtype.dtype_code formatted = period_format(self.ordinal, base) return f"Period('{formatted}', '{self.freqstr}')" @@ -2173,7 +2181,7 @@ cdef class _Period: """ Return a string representation for a particular DataFrame """ - base, mult = get_freq_code(self.freq) + base = self._dtype.dtype_code formatted = period_format(self.ordinal, base) value = str(formatted) return value @@ -2325,7 +2333,7 @@ cdef class _Period: >>> a.strftime('%b. %d, %Y was a %A') 'Jan. 01, 2001 was a Monday' """ - base, mult = get_freq_code(self.freq) + base = self._dtype.dtype_code return period_format(self.ordinal, base, fmt) diff --git a/pandas/tests/tslibs/test_api.py b/pandas/tests/tslibs/test_api.py index bbabfed4cb976..b0c524a257684 100644 --- a/pandas/tests/tslibs/test_api.py +++ b/pandas/tests/tslibs/test_api.py @@ -9,6 +9,7 @@ def test_namespace(): "base", "ccalendar", "conversion", + "dtypes", "fields", "frequencies", "nattype", diff --git a/setup.py b/setup.py index 63510867f0dd7..9f411ec10cd80 100755 --- a/setup.py +++ b/setup.py @@ -308,8 +308,8 @@ class CheckSDist(sdist_class): "pandas/_libs/ops.pyx", "pandas/_libs/parsers.pyx", "pandas/_libs/tslibs/base.pyx", - "pandas/_libs/tslibs/c_timestamp.pyx", "pandas/_libs/tslibs/ccalendar.pyx", + "pandas/_libs/tslibs/dtypes.pyx", "pandas/_libs/tslibs/period.pyx", "pandas/_libs/tslibs/strptime.pyx", "pandas/_libs/tslibs/np_datetime.pyx", @@ -605,6 +605,7 @@ def srcpath(name=None, suffix=".pyx", subdir="src"): "_libs.tslib": {"pyxfile": "_libs/tslib", "depends": tseries_depends}, "_libs.tslibs.base": {"pyxfile": "_libs/tslibs/base"}, "_libs.tslibs.ccalendar": {"pyxfile": "_libs/tslibs/ccalendar"}, + "_libs.tslibs.dtypes": {"pyxfile": "_libs/tslibs/dtypes"}, "_libs.tslibs.conversion": { "pyxfile": "_libs/tslibs/conversion", "depends": tseries_depends,
ATM we define PeriodDtype in terms of DateOffsets, but this is a misnomer. In fact, virtually every Period/PeriodArray method has to start off by taking its `.freq` and finding the corresponding integer code. This makes the integer code itself into a dtype. We lose a little bit of ground on the constructor, then make it back up in subsequent calls. ``` In [2]: per = pd.Period("2016Q1") In [3]: %timeit per.year 556 ns ± 13.3 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each) # <-- master 94.5 ns ± 1.36 ns per loop (mean ± std. dev. of 7 runs, 10000000 loops each) # <-- PR In [4]: %timeit pd.Period("2016Q1") 21.2 µs ± 176 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each) # <-- master 25.8 µs ± 457 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each) # <-- PR ``` The constructor perf I think we can improve by eventually cutting the DateOffset out of the process altogether. We'll also be able to de-duplicate a _bunch_ of other stuff: FreqGroup can be defined in terms of PeriodDypeCode, Resolution can be defined in terms of FreqGroup (xref #34462), we can avoid redundant definitions of the dtype codes in period.pyx, and a lot of the rest of libfrequencies becomes unnecessary. In a follow-up I plan to mix the cython-space PeriodDtype into the core.dtypes PeriodDtype and we can get the same perf improvements in the PeriodArray methods.
https://api.github.com/repos/pandas-dev/pandas/pulls/34499
2020-05-31T15:49:01Z
2020-06-04T05:51:16Z
2020-06-04T05:51:16Z
2020-06-04T16:50:19Z
BUG: Fix value setting in case of merging via index on one side and column on other side.
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 17a830788be3f..4188fb8006dad 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -936,6 +936,7 @@ Reshaping - Bug in :meth:`DataFrame.replace` casts columns to ``object`` dtype if items in ``to_replace`` not in values (:issue:`32988`) - Ensure only named functions can be used in :func:`eval()` (:issue:`32460`) - Fixed bug in :func:`melt` where melting MultiIndex columns with ``col_level`` > 0 would raise a ``KeyError`` on ``id_vars`` (:issue:`34129`) +- Fixed bug setting wrong values in result when joining one side over index and other side over column in case of join type not equal to inner (:issue:`17257`, :issue:`28220`, :issue:`28243` and :issue:`33232`) Sparse ^^^^^^ diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 0c796c8f45a52..a207869de8484 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -787,7 +787,25 @@ def _maybe_add_join_keys(self, result, left_indexer, right_indexer): take_left, take_right = None, None if name in result: - + array_like = is_array_like(rname) or is_array_like(lname) + right_in, left_in = False, False + if not array_like: + right_in = ( + name in self.right_on + or self.right_index is True + and name in self.right.index.names + ) + left_in = ( + name in self.left_on + or self.left_index is True + and name in self.left.index.names + ) + if ( + (not right_in or not left_in) + and not array_like + and self.how != "asof" + ): + continue if left_indexer is not None and right_indexer is not None: if name in self.left: diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index 4408aa0bbce4a..860908cd97adb 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -484,12 +484,10 @@ def check2(exp, kwarg): kwarg = dict(left_on="a", right_index=True) check1(exp_in, kwarg) - exp_out["a"] = [0, 1, 2] check2(exp_out, kwarg) kwarg = dict(left_on="a", right_on="x") check1(exp_in, kwarg) - exp_out["a"] = np.array([np.nan] * 3, dtype=object) check2(exp_out, kwarg) def test_merge_left_notempty_right_empty(self): @@ -1294,9 +1292,9 @@ def test_merge_on_index_with_more_values(self, how, index, expected_index): [0, 0, 0], [1, 1, 1], [2, 2, 2], - [np.nan, 3, 3], - [np.nan, 4, 4], - [np.nan, 5, 5], + [np.nan, np.nan, 3], + [np.nan, np.nan, 4], + [np.nan, np.nan, 5], ], columns=["a", "key", "b"], ) @@ -1311,7 +1309,7 @@ def test_merge_right_index_right(self): right = pd.DataFrame({"b": [1, 2, 3]}) expected = pd.DataFrame( - {"a": [1, 2, 3, None], "key": [0, 1, 1, 2], "b": [1, 2, 2, 3]}, + {"a": [1, 2, 3, None], "key": [0, 1, 1, None], "b": [1, 2, 2, 3]}, columns=["a", "key", "b"], index=[0, 1, 2, np.nan], ) @@ -1347,7 +1345,7 @@ def test_merge_take_missing_values_from_index_of_other_dtype(self): expected = pd.DataFrame( { "a": [1, 2, 3, None], - "key": pd.Categorical(["a", "a", "b", "c"]), + "key": pd.Categorical(["a", "a", "b", None], dtype=right.index.dtype), "b": [1, 1, 2, 3], }, index=[0, 1, 2, np.nan], @@ -2227,3 +2225,39 @@ def test_categorical_non_unique_monotonic(n_categories): index=left_index, ) tm.assert_frame_equal(expected, result) + + +@pytest.mark.parametrize( + ("how", "data"), + [ + ("left", {"y": [1, 2], "x": ["a", np.nan]}), + ("right", {"y": [1, np.nan], "x": ["a", "c"]}), + ], +) +def test_index_true_and_on_combination_values(how, data): + # gh 28220 and gh 28243 and gh 17257 + left = pd.DataFrame({"y": [1, 2]}, index=["a", "b"]) + right = pd.DataFrame({"x": ["a", "c"]}) + + result = pd.merge(left, right, left_index=True, right_on="x", how=how) + expected = pd.DataFrame(data, index=result.index) + tm.assert_frame_equal(result, expected) + + +def test_index_true_outer_join(): + # gh 33232 and gh 17257 + left = pd.DataFrame({"lkey": [0, 3], "value": [1, 5]}) + right = pd.DataFrame({"rkey": ["foo", "baz"], "value": [0, 1]}) + + result = pd.merge(left, right, how="outer", left_on="lkey", right_index=True) + + expected = pd.DataFrame( + { + "lkey": [0, 3, np.nan], + "value_x": [1, 5, np.nan], + "rkey": ["foo", np.nan, "baz"], + "value_y": [0, np.nan, 1], + }, + index=[0, 1, np.nan], + ) + tm.assert_frame_equal(result, expected)
- [x] xref #15692 - [x] xref #17257 - [x] xref #28220 - [x] xref #28243 - [x] xref #33232 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Until now the `_maybe_add_join_keys` function changed the target column in the result, if the join was done over index on one side and the column on the other side. This resulted in taking values from the index and setting them for the target column, which explained the weird behavior in the issues referenced. Together with #34468 this will fix the issues. Both combined will transfer the merged index in these cases and the merged columns with the correct name. As of now this fix will only transfer the values, so the index will be wrong here. We can not skip the method completly, because this would cause issues in case of the column is in both `DataFrames` we have to run through this steps to ensure that the target values are right. So I check if this is only contained in one side.
https://api.github.com/repos/pandas-dev/pandas/pulls/34496
2020-05-31T13:52:12Z
2021-01-01T22:04:22Z
null
2021-04-26T22:00:03Z
DOC: Clarify behaviour of pd.merge
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index d68cadbc75675..cb9a0eaca03bc 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -213,17 +213,17 @@ left_on : label or list, or array-like Column or index level names to join on in the left DataFrame. Can also be an array or list of arrays of the length of the left DataFrame. - These arrays are treated as if they are columns. + These arrays are treated as if they are columns in the result DataFrame. right_on : label or list, or array-like Column or index level names to join on in the right DataFrame. Can also be an array or list of arrays of the length of the right DataFrame. - These arrays are treated as if they are columns. + These arrays are treated as if they are columns in the result DataFrame. left_index : bool, default False Use the index from the left DataFrame as the join key(s). If it is a MultiIndex, the number of keys in the other DataFrame (either the index or a number of columns) must match the number of levels. right_index : bool, default False - Use the index from the right DataFrame as the join key. Same caveats as + Use the index from the right DataFrame as the join key(s). Same caveats as left_index. sort : bool, default False Sort the join keys lexicographically in the result DataFrame. If False, @@ -324,6 +324,22 @@ ... ValueError: columns overlap but no suffix specified: Index(['value'], dtype='object') + +Caveats of using left_on/right_on compared to using left_index/right_index + +>>> left = pd.DataFrame({'a': [1, 2], 'b': [1, 1], +... "l": [22, 23]}).set_index(['a', 'b']) +>>> right = pd.DataFrame({'b': [1], "r": [12]}).set_index(['b']) +>>> pd.merge(left, right, left_on=['b'], right_index=True, how="left") + l r +a b +1 1 22 12 +2 1 23 12 +>>> pd.merge(left, right, left_on=['b'], right_on=["b"], how="left") + l r +b +1 22 12 +1 23 12 """
- [x] Closes #34412 (also attempts to clarify #34273) - [x] tests added / passed (passes `scripts/validate_docstrings.py`) - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry (Not needed)
https://api.github.com/repos/pandas-dev/pandas/pulls/34495
2020-05-31T10:00:51Z
2020-07-08T16:21:40Z
null
2020-07-08T16:21:41Z
ENH: Add read_logfmt()
diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst index ba99aaa9f430c..7e8e39af95325 100644 --- a/doc/source/getting_started/install.rst +++ b/doc/source/getting_started/install.rst @@ -270,6 +270,7 @@ blosc Compression for HDF5 fastparquet 0.3.2 Parquet reading / writing gcsfs 0.2.2 Google Cloud Storage access html5lib HTML parser for read_html (see :ref:`note <optional_html>`) +logfmt 0.4.0 logfmt reading lxml 3.8.0 HTML parser for read_html (see :ref:`note <optional_html>`) matplotlib 2.2.2 Visualization numba 0.46.0 Alternative execution engine for rolling operations diff --git a/doc/source/reference/io.rst b/doc/source/reference/io.rst index 0037d4a4410c3..aeb12822ff2f3 100644 --- a/doc/source/reference/io.rst +++ b/doc/source/reference/io.rst @@ -61,6 +61,13 @@ JSON .. currentmodule:: pandas +logfmt +~~~~~~ +.. autosummary:: + :toctree: api/ + + read_logfmt + HTML ~~~~ .. autosummary:: diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index df6b44ac654ce..0b1f46e614fc3 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -21,6 +21,7 @@ The pandas I/O API is a set of top level ``reader`` functions accessed like text;`CSV <https://en.wikipedia.org/wiki/Comma-separated_values>`__;:ref:`read_csv<io.read_csv_table>`;:ref:`to_csv<io.store_in_csv>` text;Fixed-Width Text File;:ref:`read_fwf<io.fwf_reader>` text;`JSON <https://www.json.org/>`__;:ref:`read_json<io.json_reader>`;:ref:`to_json<io.json_writer>` + text;`logfmt <https://www.brandur.org/logfmt>`__;:ref:`read_logfmt<io.read_logfmt>` text;`HTML <https://en.wikipedia.org/wiki/HTML>`__;:ref:`read_html<io.read_html>`;:ref:`to_html<io.html>` text; Local clipboard;:ref:`read_clipboard<io.clipboard>`;:ref:`to_clipboard<io.clipboard>` ;`MS Excel <https://en.wikipedia.org/wiki/Microsoft_Excel>`__;:ref:`read_excel<io.excel_reader>`;:ref:`to_excel<io.excel_writer>` diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 88bf0e005a221..effc1b883444d 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -13,6 +13,24 @@ including other versions of pandas. Enhancements ~~~~~~~~~~~~ +.. _whatsnew_110.logfmt: + +logfmt_ parsing was added: + +.. _logfmt : https://www.brandur.org/logfmt + + +.. ipython:: python + :suppress: + + from io import StringIO + +.. ipython:: python + + data = "level=debug x=1\nlevel=debug x=5" + pd.read_logfmt(StringIO(data)) + + .. _whatsnew_110.astype_string: All dtypes can now be converted to ``StringDtype`` diff --git a/environment.yml b/environment.yml index b81404094fa4c..ff10f87037bac 100644 --- a/environment.yml +++ b/environment.yml @@ -97,6 +97,7 @@ dependencies: - python-snappy # required by pyarrow - pyqt>=5.9.2 # pandas.read_clipboard + - logfmt>=0.4 # pandas.read_logfmt - pytables>=3.4.3 # pandas.read_hdf, DataFrame.to_hdf - s3fs # pandas.read_csv... when using 's3://...' path - sqlalchemy # pandas.read_sql, DataFrame.to_sql diff --git a/pandas/__init__.py b/pandas/__init__.py index d6584bf4f1c4f..32d40f44643a5 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -166,6 +166,7 @@ read_gbq, read_html, read_json, + read_logfmt, read_stata, read_sas, read_spss, diff --git a/pandas/io/api.py b/pandas/io/api.py index 2d25ffe5f8a6b..2af83ab0158a9 100644 --- a/pandas/io/api.py +++ b/pandas/io/api.py @@ -10,6 +10,7 @@ from pandas.io.gbq import read_gbq from pandas.io.html import read_html from pandas.io.json import read_json +from pandas.io.logfmt_format import read_logfmt from pandas.io.orc import read_orc from pandas.io.parquet import read_parquet from pandas.io.parsers import read_csv, read_fwf, read_table diff --git a/pandas/io/logfmt_format.py b/pandas/io/logfmt_format.py new file mode 100644 index 0000000000000..9e5fae42f58e4 --- /dev/null +++ b/pandas/io/logfmt_format.py @@ -0,0 +1,166 @@ +"""logfmt format support""" + +from collections import abc +from io import StringIO +from itertools import islice +from typing import Optional, Dict, Iterable, Generator, Union + +from pandas._typing import FilePathOrBuffer, Dtype +from pandas.compat._optional import import_optional_dependency + +from pandas import DataFrame +from pandas.core.indexes.api import RangeIndex +from pandas.core.reshape.concat import concat + +from pandas.io.common import get_filepath_or_buffer, get_handle, infer_compression +from pandas.io.parsers import _validate_integer + + +def read_logfmt( + filepath_or_buffer: FilePathOrBuffer, + dtype: Optional[Dtype] = None, + chunksize: Optional[int] = None, + encoding: Optional[str] = None, + compression: Optional[str] = "infer", +): + """ + Load a logfmt_ from the file path. + + .. _logfmt: https://www.brandur.org/logfmt + + Parameters + ---------- + filepath_or_buffer : path object or file-like object + Any valid string path is acceptable. The string could be a URL. + + If you want to pass in a path object, pandas accepts any + ``os.PathLike``. + + By file-like object, we refer to objects with a ``read()`` method, + such as a file handler (e.g. via builtin ``open`` function) + or ``StringIO``. + + dtype : bool or dict, default None + If True, infer dtypes; if a dict of column to dtype, then use those; + if False, then don't infer dtypes at all, applies only to the data. + + chunksize : int, optional + Number of lines to be read per iteration. + If None, read the whole file. + + encoding : str, optional + Encoding to be used by the parser + + compression : str, optional + Compression to be used by the parser + + Returns + ------- + Data frame or `LogfmtReader`, if `chunksize` is specified. + """ + + compression = infer_compression(filepath_or_buffer, compression) + filepath_or_buffer, _, _, should_close = get_filepath_or_buffer( + filepath_or_buffer, encoding=encoding, compression=compression + ) + + logfmt_reader = LogfmtReader( + filepath_or_buffer, + dtype=dtype, + chunksize=chunksize, + encoding=encoding, + compression=compression, + ) + + if chunksize: + return logfmt_reader + + result = logfmt_reader.read() + if should_close: + result.close() + + return result + + +class LogfmtReader(abc.Iterator): + """ + LogfmtReader provides an interface for reading in a logfmt file. + """ + + def __init__( + self, + filepath_or_buffer: FilePathOrBuffer, + dtype: Optional[Dtype], + chunksize: Optional[int], + encoding: Optional[str], + compression: Optional[str], + ) -> None: + self.chunksize = chunksize + self.dtype = dtype + self.encoding = encoding + self.compression = compression + self.nrows_seen = 0 + self.should_close = False + + if self.chunksize is not None: + self.chunksize = _validate_integer("chunksize", self.chunksize, 1) + + if isinstance(filepath_or_buffer, str) or self.compression is not None: + self.data, _ = get_handle( + filepath_or_buffer, + "r", + encoding=self.encoding, + compression=self.compression, + ) + self.should_close = True + else: + self.data = filepath_or_buffer + + def _get_data_from_filepath(self, path: FilePathOrBuffer): + return path + + def read(self) -> DataFrame: + return concat(self) + + def close(self) -> None: + """ + If we opened a stream earlier, we should close it. + + If an open stream or file was passed, we leave it open. + """ + if self.should_close: + try: + self.data.close() + except (IOError, AttributeError): + pass + + @staticmethod + def infer_types(lines: Iterable[Dict]) -> Generator[Dict, None, None]: + """Infer types for parsed logfmt lines""" + for line in lines: + for key in line: + try: + line[key] = int(line[key]) + except ValueError: + try: + line[key] = float(line[key]) + except ValueError: + pass + yield line + + def __next__(self): + logfmt = import_optional_dependency("logfmt") + + lines = list(islice(self.data, self.chunksize)) + if lines: + logfmt_lines = self.infer_types(logfmt.parse(StringIO("\n".join(lines)))) + obj = DataFrame(logfmt_lines, dtype=self.dtype) + + # Make sure that the returned objects have the right index. + obj.index = RangeIndex(self.nrows_seen, self.nrows_seen + len(obj)) + self.nrows_seen += len(obj) + + return obj + + self.close() + raise StopIteration diff --git a/pandas/tests/io/conftest.py b/pandas/tests/io/conftest.py index f1de15dd34464..bfcdf012b0dce 100644 --- a/pandas/tests/io/conftest.py +++ b/pandas/tests/io/conftest.py @@ -19,6 +19,12 @@ def jsonl_file(datapath): return datapath("io", "parser", "data", "items.jsonl") +@pytest.fixture +def logfmt_file(datapath): + """Path to logfmt file""" + return datapath("io", "parser", "data", "logfmt.log") + + @pytest.fixture def salaries_table(datapath): """DataFrame with the salaries dataset""" diff --git a/pandas/tests/io/parser/data/logfmt.log b/pandas/tests/io/parser/data/logfmt.log new file mode 100644 index 0000000000000..4c5e6ad64e480 --- /dev/null +++ b/pandas/tests/io/parser/data/logfmt.log @@ -0,0 +1,3 @@ +tag=first foo=1 bar=1.0 +tag="second line" bar=2.0 foo=2 +tag="\"third line\"" foo=3 bar=3.0 diff --git a/pandas/tests/io/parser/data/logfmt.log.gz b/pandas/tests/io/parser/data/logfmt.log.gz new file mode 100644 index 0000000000000..9a7626f93ed11 Binary files /dev/null and b/pandas/tests/io/parser/data/logfmt.log.gz differ diff --git a/pandas/tests/io/test_logfmt.py b/pandas/tests/io/test_logfmt.py new file mode 100644 index 0000000000000..1bb2808501743 --- /dev/null +++ b/pandas/tests/io/test_logfmt.py @@ -0,0 +1,36 @@ +"""Test logfmt format support""" + +import pytest + +import pandas as pd +from pandas import DataFrame, read_logfmt +import pandas._testing as tm + + +@pytest.fixture +def logfmt_file(datapath): + """Path to logfmt file""" + return datapath("io", "parser", "data", "logfmt.log") + + +@pytest.fixture +def logfmt_gzip_file(datapath): + """Path to gzipped logfmt file""" + return datapath("io", "parser", "data", "logfmt.log.gz") + + +class TestLogfmt: + def test_read_logfmt(self, logfmt_file, logfmt_gzip_file): + expected_df = DataFrame( + [["first", 1, 1.0], ["second line", 2, 2.0], ['"third line"', 3, 3.0]], + columns=["tag", "foo", "bar"], + ) + + parsed_df = read_logfmt(logfmt_file) + tm.assert_frame_equal(parsed_df, expected_df) + + parsed_gzip_df = read_logfmt(logfmt_gzip_file) + tm.assert_frame_equal(parsed_gzip_df, expected_df) + + logfmt_reader = read_logfmt(logfmt_file, chunksize=2) + tm.assert_frame_equal(logfmt_reader.read(), expected_df)
- [x] closes #34501 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/34494
2020-05-31T06:31:31Z
2020-05-31T21:10:17Z
null
2020-05-31T21:10:22Z
Test for pd.to_sql column error if data contains -np.inf
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 0ca19ffd1f496..3767c8f23f62d 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -1044,6 +1044,7 @@ I/O - Bug in :meth:`HDFStore.append_to_multiple` was raising a ``ValueError`` when the min_itemsize parameter is set (:issue:`11238`) - Bug in :meth:`~HDFStore.create_table` now raises an error when `column` argument was not specified in `data_columns` on input (:issue:`28156`) - :meth:`read_json` now could read line-delimited json file from a file url while `lines` and `chunksize` are set. +- Bug in :meth:`DataFrame.to_sql` when reading DataFrames with ``-np.inf`` entries with MySQL now has a more explicit ``ValueError`` (:issue:`34431`) Plotting ^^^^^^^^ diff --git a/pandas/io/sql.py b/pandas/io/sql.py index b137608475b3d..9177696ca13d6 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -1391,7 +1391,20 @@ def to_sql( dtype=dtype, ) table.create() - table.insert(chunksize, method=method) + + from sqlalchemy import exc + + try: + table.insert(chunksize, method=method) + except exc.SQLAlchemyError as err: + # GH34431 + msg = "(1054, \"Unknown column 'inf' in 'field list'\")" + err_text = str(err.orig) + if re.search(msg, err_text): + raise ValueError("inf cannot be used with MySQL") from err + else: + raise err + if not name.isdigit() and not name.islower(): # check for potentially case sensitivity issues (GH7815) # Only check when name is not a number and name is not lower case diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index a07e7a74b7573..0991fae39138e 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -1813,6 +1813,24 @@ def main(connectable): DataFrame({"test_foo_data": [0, 1, 2]}).to_sql("test_foo_data", self.conn) main(self.conn) + @pytest.mark.parametrize( + "input", + [{"foo": [np.inf]}, {"foo": [-np.inf]}, {"foo": [-np.inf], "infe0": ["bar"]}], + ) + def test_to_sql_with_negative_npinf(self, input): + # GH 34431 + + df = pd.DataFrame(input) + + if self.flavor == "mysql": + msg = "inf cannot be used with MySQL" + with pytest.raises(ValueError, match=msg): + df.to_sql("foobar", self.conn, index=False) + else: + df.to_sql("foobar", self.conn, index=False) + res = sql.read_sql_table("foobar", self.conn) + tm.assert_equal(df, res) + def test_temporary_table(self): test_data = "Hello, World!" expected = DataFrame({"spam": [test_data]})
- [x] closes #34431 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/34493
2020-05-31T03:31:31Z
2020-07-01T18:28:31Z
2020-07-01T18:28:31Z
2020-07-01T18:28:50Z
CLN: remove unused JsonReader.path_or_buf
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index ac6f9ff372601..72aa8fdd16e6d 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -639,7 +639,6 @@ def __init__( compression, ): - self.path_or_buf = filepath_or_buffer self.orient = orient self.typ = typ self.dtype = dtype
- [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/34492
2020-05-31T01:53:57Z
2020-05-31T21:30:38Z
2020-05-31T21:30:38Z
2020-05-31T22:04:02Z
CLN: de-duplicate bits lib timestamps
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 48c4afe7d4c1b..4e377656f213f 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -25,7 +25,7 @@ from pandas._libs.tslibs.util cimport ( is_timedelta64_object, is_array, ) -from pandas._libs.tslibs.base cimport ABCTimedelta, ABCTimestamp +from pandas._libs.tslibs.base cimport ABCTimestamp from pandas._libs.tslibs cimport ccalendar @@ -41,6 +41,7 @@ from pandas._libs.tslibs.np_datetime cimport ( ) from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime from pandas._libs.tslibs.offsets cimport to_offset, is_tick_object, is_offset_object +from pandas._libs.tslibs.timedeltas cimport is_any_td_scalar, delta_to_nanoseconds from pandas._libs.tslibs.timedeltas import Timedelta from pandas._libs.tslibs.timezones cimport ( is_utc, maybe_get_tz, treat_tz_as_pytz, utc_pytz as UTC, @@ -344,37 +345,15 @@ cdef class _Timestamp(ABCTimestamp): def __add__(self, other): cdef: - int64_t other_int, nanos = 0 - - if is_timedelta64_object(other): - other_int = other.astype('timedelta64[ns]').view('i8') - return type(self)(self.value + other_int, tz=self.tzinfo, freq=self.freq) - - elif is_integer_object(other): - raise integer_op_not_supported(self) - - elif PyDelta_Check(other): - # logic copied from delta_to_nanoseconds to prevent circular import - if isinstance(other, ABCTimedelta): - # pd.Timedelta - nanos = other.value - else: - nanos = (other.days * 24 * 60 * 60 * 1000000 + - other.seconds * 1000000 + - other.microseconds) * 1000 + int64_t nanos = 0 + if is_any_td_scalar(other): + nanos = delta_to_nanoseconds(other) result = type(self)(self.value + nanos, tz=self.tzinfo, freq=self.freq) return result - elif is_tick_object(other): - try: - nanos = other.nanos - except OverflowError as err: - raise OverflowError( - f"the add operation between {other} and {self} will overflow" - ) from err - result = type(self)(self.value + nanos, tz=self.tzinfo, freq=self.freq) - return result + elif is_integer_object(other): + raise integer_op_not_supported(self) elif is_array(other): if other.dtype.kind in ['i', 'u']: @@ -395,8 +374,7 @@ cdef class _Timestamp(ABCTimestamp): def __sub__(self, other): - if (is_timedelta64_object(other) or is_integer_object(other) or - PyDelta_Check(other) or is_tick_object(other)): + if is_any_td_scalar(other) or is_integer_object(other): neg_other = -other return self + neg_other @@ -434,7 +412,6 @@ cdef class _Timestamp(ABCTimestamp): # scalar Timestamp/datetime - Timestamp/datetime -> yields a # Timedelta - from pandas._libs.tslibs.timedeltas import Timedelta try: return Timedelta(self.value - other.value) except (OverflowError, OutOfBoundsDatetime) as err: diff --git a/pandas/tests/scalar/timestamp/test_arithmetic.py b/pandas/tests/scalar/timestamp/test_arithmetic.py index ed0045bcab989..eb9932f9a3a97 100644 --- a/pandas/tests/scalar/timestamp/test_arithmetic.py +++ b/pandas/tests/scalar/timestamp/test_arithmetic.py @@ -38,14 +38,17 @@ def test_overflow_offset_raises(self): r"\<-?\d+ \* Days\> and \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} " "will overflow" ) + lmsg = "|".join( + ["Python int too large to convert to C long", "int too big to convert"] + ) - with pytest.raises(OverflowError, match=msg): + with pytest.raises(OverflowError, match=lmsg): stamp + offset_overflow with pytest.raises(OverflowError, match=msg): offset_overflow + stamp - with pytest.raises(OverflowError, match=msg): + with pytest.raises(OverflowError, match=lmsg): stamp - offset_overflow # xref https://github.com/pandas-dev/pandas/issues/14080 @@ -54,13 +57,13 @@ def test_overflow_offset_raises(self): stamp = Timestamp("2000/1/1") offset_overflow = to_offset("D") * 100 ** 5 - with pytest.raises(OverflowError, match=msg): + with pytest.raises(OverflowError, match=lmsg): stamp + offset_overflow with pytest.raises(OverflowError, match=msg): offset_overflow + stamp - with pytest.raises(OverflowError, match=msg): + with pytest.raises(OverflowError, match=lmsg): stamp - offset_overflow def test_overflow_timestamp_raises(self):
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/34491
2020-05-31T00:50:29Z
2020-05-31T22:20:10Z
2020-05-31T22:20:10Z
2020-05-31T22:32:59Z
DOC: Added documentation for building using pyenv
diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst index d02896f777348..3022609f976c6 100644 --- a/doc/source/development/contributing.rst +++ b/doc/source/development/contributing.rst @@ -270,7 +270,7 @@ Creating a Python environment (pip) If you aren't using conda for your development environment, follow these instructions. You'll need to have at least Python 3.6.1 installed on your system. -**Unix**/**Mac OS** +**Unix**/**Mac OS with virtualenv** .. code-block:: bash @@ -286,7 +286,31 @@ You'll need to have at least Python 3.6.1 installed on your system. python -m pip install -r requirements-dev.txt # Build and install pandas - python setup.py build_ext --inplace -j 0 + python setup.py build_ext --inplace -j 4 + python -m pip install -e . --no-build-isolation --no-use-pep517 + +**Unix**/**Mac OS with pyenv** + +Consult the docs for setting up pyenv `here <https://github.com/pyenv/pyenv>`__. + +.. code-block:: bash + + # Create a virtual environment + # Use an ENV_DIR of your choice. We'll use ~/Users/<yourname>/.pyenv/versions/pandas-dev + + pyenv virtualenv <version> <name-to-give-it> + + # For instance: + pyenv virtualenv 3.7.6 pandas-dev + + # Activate the virtualenv + pyenv activate pandas-dev + + # Now install the build dependencies in the cloned pandas repo + python -m pip install -r requirements-dev.txt + + # Build and install pandas + python setup.py build_ext --inplace -j 4 python -m pip install -e . --no-build-isolation --no-use-pep517 **Windows** @@ -312,7 +336,7 @@ should already exist. python -m pip install -r requirements-dev.txt # Build and install pandas - python setup.py build_ext --inplace -j 0 + python setup.py build_ext --inplace -j 4 python -m pip install -e . --no-build-isolation --no-use-pep517 Creating a branch
Added documentation for building using pyenv. Discussed this PR with @WillAyd on gitter on May 20. Also fixed up another line he mentioned (python setup.py build_ext --inplace -j 4 instead of: python setup.py build_ext --inplace -j 0) for conda/venv/pyenv. Have rebuilt the docs and ensured they build properly.
https://api.github.com/repos/pandas-dev/pandas/pulls/34490
2020-05-30T20:52:29Z
2020-06-07T20:09:52Z
2020-06-07T20:09:52Z
2020-06-07T20:10:05Z
Added documentation for building using pyenv.
diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst index d02896f777348..53662de7664b8 100644 --- a/doc/source/development/contributing.rst +++ b/doc/source/development/contributing.rst @@ -270,7 +270,7 @@ Creating a Python environment (pip) If you aren't using conda for your development environment, follow these instructions. You'll need to have at least Python 3.6.1 installed on your system. -**Unix**/**Mac OS** +**Unix**/**Mac OS with virtualenv** .. code-block:: bash @@ -286,7 +286,31 @@ You'll need to have at least Python 3.6.1 installed on your system. python -m pip install -r requirements-dev.txt # Build and install pandas - python setup.py build_ext --inplace -j 0 + python setup.py build_ext --inplace -j 4 + python -m pip install -e . --no-build-isolation --no-use-pep517 + +**Unix**/**Mac OS with pyenv** + +Consult the docs for setting up pyenv `here <https://github.com/pyenv/pyenv>`__. + +.. code-block:: bash + + # Create a virtual environment + # Use an ENV_DIR of your choice. We'll use ~/Users/<yourname>/.pyenv/versions/pandas-dev + + pyenv virtualenv <version> <name-to-give-it> + + #For instance: + pyenv virtualenv 3.7.6 pandas-dev + + # Activate the virtualenv + pyenv global pandas-dev + + # Now install the build dependencies in the cloned pandas repo + python -m pip install -r requirements-dev.txt + + # Build and install pandas + python setup.py build_ext --inplace -j 4 python -m pip install -e . --no-build-isolation --no-use-pep517 **Windows** @@ -312,7 +336,7 @@ should already exist. python -m pip install -r requirements-dev.txt # Build and install pandas - python setup.py build_ext --inplace -j 0 + python setup.py build_ext --inplace -j 4 python -m pip install -e . --no-build-isolation --no-use-pep517 Creating a branch
Added documentation for building using pyenv. Discussed this PR with @WillAyd on gitter on May 20. Also fixed up another line he mentioned (python setup.py build_ext --inplace -j 4 instead of: python setup.py build_ext --inplace -j 0) for conda/venv/pyenv. Have rebuilt the docs and ensured they build properly.
https://api.github.com/repos/pandas-dev/pandas/pulls/34489
2020-05-30T20:24:03Z
2020-05-30T20:50:34Z
null
2020-05-30T20:50:34Z
BUG: Raise ValueError for non numerical join columns in merge_asof
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index b4b98ec0403a8..84f1fa6ad8086 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -302,8 +302,7 @@ Reshaping - Bug in :meth:`DataFrame.unstack` with missing levels led to incorrect index names (:issue:`37510`) - Bug in :func:`join` over :class:`MultiIndex` returned wrong result, when one of both indexes had only one level (:issue:`36909`) - Bug in :func:`concat` incorrectly casting to ``object`` dtype in some cases when one or more of the operands is empty (:issue:`38843`, :issue:`38907`) -- - +- :meth:`merge_asof` raises ``ValueError`` instead of cryptic ``TypeError`` in case of non-numerical merge columns (:issue:`29130`) Sparse ^^^^^^ diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index ac5fc7cddf82a..1caf1a2a023da 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -1708,6 +1708,23 @@ def _validate_specification(self): if self.left_by is not None and self.right_by is None: raise MergeError("missing right_by") + # GH#29130 Check that merge keys do not have dtype object + lo_dtype = ( + self.left[self.left_on[0]].dtype + if not self.left_index + else self.left.index.dtype + ) + ro_dtype = ( + self.right[self.right_on[0]].dtype + if not self.right_index + else self.right.index.dtype + ) + if is_object_dtype(lo_dtype) or is_object_dtype(ro_dtype): + raise MergeError( + f"Incompatible merge dtype, {repr(ro_dtype)} and " + f"{repr(lo_dtype)}, both sides must have numeric dtype" + ) + # add 'by' to our key-list so we can have it in the # output as a key if self.left_by is not None: diff --git a/pandas/tests/reshape/merge/test_merge_asof.py b/pandas/tests/reshape/merge/test_merge_asof.py index 5cb7bdd603517..ecff63b495fbb 100644 --- a/pandas/tests/reshape/merge/test_merge_asof.py +++ b/pandas/tests/reshape/merge/test_merge_asof.py @@ -1168,7 +1168,7 @@ def test_on_float_by_int(self): tm.assert_frame_equal(result, expected) def test_merge_datatype_error_raises(self): - msg = r"incompatible merge keys \[0\] .*, must be the same type" + msg = r"Incompatible merge dtype, .*, both sides must have numeric dtype" left = pd.DataFrame({"left_val": [1, 5, 10], "a": ["a", "b", "c"]}) right = pd.DataFrame({"right_val": [1, 2, 3, 6, 7], "a": [1, 2, 3, 6, 7]}) @@ -1373,3 +1373,39 @@ def test_left_index_right_index_tolerance(self): tolerance=Timedelta(seconds=0.5), ) tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "kwargs", [{"on": "x"}, {"left_index": True, "right_index": True}] +) +@pytest.mark.parametrize( + "data", + [["2019-06-01 00:09:12", "2019-06-01 00:10:29"], [1.0, "2019-06-01 00:10:29"]], +) +def test_merge_asof_non_numerical_dtype(kwargs, data): + # GH#29130 + left = pd.DataFrame({"x": data}, index=data) + right = pd.DataFrame({"x": data}, index=data) + with pytest.raises( + MergeError, + match=r"Incompatible merge dtype, .*, both sides must have numeric dtype", + ): + pd.merge_asof(left, right, **kwargs) + + +def test_merge_asof_non_numerical_dtype_object(): + # GH#29130 + left = pd.DataFrame({"a": ["12", "13", "15"], "left_val1": ["a", "b", "c"]}) + right = pd.DataFrame({"a": ["a", "b", "c"], "left_val": ["d", "e", "f"]}) + with pytest.raises( + MergeError, + match=r"Incompatible merge dtype, .*, both sides must have numeric dtype", + ): + pd.merge_asof( + left, + right, + left_on="left_val1", + right_on="a", + left_by="a", + right_by="left_val", + )
- [x] closes #29130 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry I checked for dtype object. Normally, I would try to check for allowed dtypes, but that would require a lot of dtype checks instead of only one. I hope object dtype is sufficient to exclude
https://api.github.com/repos/pandas-dev/pandas/pulls/34488
2020-05-30T19:57:50Z
2021-01-06T00:16:29Z
2021-01-06T00:16:28Z
2021-07-25T21:58:03Z
TST, TYP: _use_dynamic_x
diff --git a/pandas/plotting/_matplotlib/timeseries.py b/pandas/plotting/_matplotlib/timeseries.py index 9d8c26093296e..475452c71db58 100644 --- a/pandas/plotting/_matplotlib/timeseries.py +++ b/pandas/plotting/_matplotlib/timeseries.py @@ -7,6 +7,7 @@ from pandas._libs.tslibs import Period, to_offset from pandas._libs.tslibs.frequencies import FreqGroup, base_and_stride, get_freq_code +from pandas._typing import FrameOrSeriesUnion from pandas.core.dtypes.generic import ( ABCDatetimeIndex, @@ -192,7 +193,7 @@ def _get_freq(ax, series: "Series"): return freq, ax_freq -def _use_dynamic_x(ax, data): +def _use_dynamic_x(ax, data: "FrameOrSeriesUnion") -> bool: freq = _get_index_freq(data.index) ax_freq = _get_ax_freq(ax) diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index 7dcb692e29337..738df5244955a 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -16,7 +16,7 @@ from pandas.core.resample import DatetimeIndex from pandas.tests.plotting.common import TestPlotBase -from pandas.tseries.offsets import DateOffset +from pandas.tseries.offsets import DateOffset, WeekOfMonth @td.skip_if_no_mpl @@ -325,6 +325,18 @@ def test_business_freq_convert(self): idx = ax.get_lines()[0].get_xdata() assert PeriodIndex(data=idx).freqstr == "M" + def test_freq_with_no_period_alias(self): + # GH34487 + freq = WeekOfMonth() + bts = tm.makeTimeSeries(5).asfreq(freq) + _, ax = self.plt.subplots() + bts.plot(ax=ax) + assert ax.get_lines()[0].get_xydata()[0, 0] == bts.index[0].toordinal() + idx = ax.get_lines()[0].get_xdata() + msg = "freq not specified and cannot be inferred" + with pytest.raises(ValueError, match=msg): + PeriodIndex(data=idx) + def test_nonzero_base(self): # GH2571 idx = date_range("2012-12-20", periods=24, freq="H") + timedelta(minutes=30)
This condition (which, from the current plotting test suite, is unreachable) was introduced in #9814, when `_use_dynamic_x` was written. Is there any reason why `get_period_alias(freq)` would return `None` if `freq` is a valid time frequency? If so, I'll add a test - else, this PR removes unreachable code. Added an annotation for `data` and the return type while I was here - is there a way to annotate `ax`? UPDATE ------ As it turns out (thanks jbrockmendel and jorisvandenbossche), this line can be hit, so I've included a test which does
https://api.github.com/repos/pandas-dev/pandas/pulls/34487
2020-05-30T18:54:09Z
2020-06-04T19:37:09Z
2020-06-04T19:37:09Z
2020-06-04T19:52:48Z
ENH: mul(Tick, float); simplify to_offset
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 88bf0e005a221..77adf6dfe53a9 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -994,6 +994,7 @@ Other - Bug in :meth:`DataFrame.plot.scatter` caused an error when plotting variable marker sizes (:issue:`32904`) - :class:`IntegerArray` now implements the ``sum`` operation (:issue:`33172`) - Bug in :class:`Tick` comparisons raising ``TypeError`` when comparing against timedelta-like objects (:issue:`34088`) +- Bug in :class:`Tick` multiplication raising ``TypeError`` when multiplying by a float (:issue:`34486`) .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 7f7dd62540387..0caacd81c53f5 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -25,7 +25,11 @@ cnp.import_array() from pandas._libs.properties import cache_readonly from pandas._libs.tslibs cimport util -from pandas._libs.tslibs.util cimport is_integer_object, is_datetime64_object +from pandas._libs.tslibs.util cimport ( + is_integer_object, + is_datetime64_object, + is_float_object, +) from pandas._libs.tslibs.base cimport ABCTimestamp @@ -743,6 +747,25 @@ cdef class Tick(SingleConstructorOffset): "Tick offset with `normalize=True` are not allowed." ) + # FIXME: Without making this cpdef, we get AttributeError when calling + # from __mul__ + cpdef Tick _next_higher_resolution(Tick self): + if type(self) is Day: + return Hour(self.n * 24) + if type(self) is Hour: + return Minute(self.n * 60) + if type(self) is Minute: + return Second(self.n * 60) + if type(self) is Second: + return Milli(self.n * 1000) + if type(self) is Milli: + return Micro(self.n * 1000) + if type(self) is Micro: + return Nano(self.n * 1000) + raise NotImplementedError(type(self)) + + # -------------------------------------------------------------------- + def _repr_attrs(self) -> str: # Since cdef classes have no __dict__, we need to override return "" @@ -791,6 +814,21 @@ cdef class Tick(SingleConstructorOffset): def __gt__(self, other): return self.delta.__gt__(other) + def __mul__(self, other): + if not isinstance(self, Tick): + # cython semantics, this is __rmul__ + return other.__mul__(self) + if is_float_object(other): + n = other * self.n + # If the new `n` is an integer, we can represent it using the + # same Tick subclass as self, otherwise we need to move up + # to a higher-resolution subclass + if np.isclose(n % 1, 0): + return type(self)(int(n)) + new_self = self._next_higher_resolution() + return new_self * other + return BaseOffset.__mul__(self, other) + def __truediv__(self, other): if not isinstance(self, Tick): # cython semantics mean the args are sometimes swapped @@ -3563,6 +3601,9 @@ cpdef to_offset(freq): >>> to_offset(Hour()) <Hour> """ + # TODO: avoid runtime imports + from pandas._libs.tslibs.timedeltas import Timedelta + if freq is None: return None @@ -3589,7 +3630,9 @@ cpdef to_offset(freq): if split[-1] != "" and not split[-1].isspace(): # the last element must be blank raise ValueError("last element must be blank") - for sep, stride, name in zip(split[0::4], split[1::4], split[2::4]): + + tups = zip(split[0::4], split[1::4], split[2::4]) + for n, (sep, stride, name) in enumerate(tups): if sep != "" and not sep.isspace(): raise ValueError("separator must be spaces") prefix = _lite_rule_alias.get(name) or name @@ -3598,16 +3641,22 @@ cpdef to_offset(freq): if not stride: stride = 1 - # TODO: avoid runtime import - from .resolution import Resolution, reso_str_bump_map + if prefix in {"D", "H", "T", "S", "L", "U", "N"}: + # For these prefixes, we have something like "3H" or + # "2.5T", so we can construct a Timedelta with the + # matching unit and get our offset from delta_to_tick + td = Timedelta(1, unit=prefix) + off = delta_to_tick(td) + offset = off * float(stride) + if n != 0: + # If n==0, then stride_sign is already incorporated + # into the offset + offset *= stride_sign + else: + stride = int(stride) + offset = _get_offset(name) + offset = offset * int(np.fabs(stride) * stride_sign) - if prefix in reso_str_bump_map: - stride, name = Resolution.get_stride_from_decimal( - float(stride), prefix - ) - stride = int(stride) - offset = _get_offset(name) - offset = offset * int(np.fabs(stride) * stride_sign) if delta is None: delta = offset else: diff --git a/pandas/tests/tseries/offsets/test_ticks.py b/pandas/tests/tseries/offsets/test_ticks.py index e5b0142dae48b..10c239c683bc0 100644 --- a/pandas/tests/tseries/offsets/test_ticks.py +++ b/pandas/tests/tseries/offsets/test_ticks.py @@ -244,6 +244,22 @@ def test_tick_division(cls): assert result.delta == off.delta / 0.001 +def test_tick_mul_float(): + off = Micro(2) + + # Case where we retain type + result = off * 1.5 + expected = Micro(3) + assert result == expected + assert isinstance(result, Micro) + + # Case where we bump up to the next type + result = off * 1.25 + expected = Nano(2500) + assert result == expected + assert isinstance(result, Nano) + + @pytest.mark.parametrize("cls", tick_classes) def test_tick_rdiv(cls): off = cls(10)
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry This will let us remove Resolution.get_stride_from_decimal
https://api.github.com/repos/pandas-dev/pandas/pulls/34486
2020-05-30T18:50:31Z
2020-06-01T02:40:02Z
2020-06-01T02:40:02Z
2020-06-01T03:22:12Z
DOC: updating the `indicator` wording in `merge` doc
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 2666597cbf765..5c906bf4daa2e 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -233,14 +233,13 @@ copy : bool, default True If False, avoid copy if possible. indicator : bool or str, default False - If True, adds a column to output DataFrame called "_merge" with - information on the source of each row. - If string, column with information on source of each row will be added to - output DataFrame, and column will be named value of string. - Information column is Categorical-type and takes on a value of "left_only" - for observations whose merge key only appears in 'left' DataFrame, - "right_only" for observations whose merge key only appears in 'right' - DataFrame, and "both" if the observation's merge key is found in both. + If True, adds a column to the output DataFrame called "_merge" with + information on the source of each row. The column can be given a different + name by providing a string argument. The column will have a Categorical + type with the value of "left_only" for observations whose merge key only + appears in the left DataFrame, "right_only" for observations + whose merge key only appears in the right DataFrame, and "both" + if the observation's merge key is found in both DataFrames. validate : str, optional If specified, checks if merge is of specified type.
- [ ] closes #34480 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/34485
2020-05-30T18:00:46Z
2020-06-07T20:26:14Z
2020-06-07T20:26:14Z
2020-06-07T20:26:21Z
BUG: merge_asof should be treated as a left join
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index c9267a756bef3..f3d4d53b00aa2 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -786,6 +786,7 @@ Reshaping ^^^^^^^^^ - Bug in :func:`merge` raising error when performing an inner join with partial index and ``right_index`` when no overlap between indices (:issue:`33814`) - Bug in :meth:`DataFrame.unstack` with missing levels led to incorrect index names (:issue:`37510`) +- Bug in :func:`merge_asof` propagating the right Index with ``left_index=True`` and ``right_on`` specification instead of left Index (:issue:`33463`) - Bug in :func:`join` over :class:`MultiIndex` returned wrong result, when one of both indexes had only one level (:issue:`36909`) - :meth:`merge_asof` raises ``ValueError`` instead of cryptic ``TypeError`` in case of non-numerical merge columns (:issue:`29130`) - Bug in :meth:`DataFrame.join` not assigning values correctly when having :class:`MultiIndex` where at least one dimension is from dtype ``Categorical`` with non-alphabetically sorted categories (:issue:`38502`) diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 94d78f6b54b91..8cee0dd2abb88 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -969,7 +969,16 @@ def _get_join_info( join_index = self.right.index.take(right_indexer) left_indexer = np.array([-1] * len(join_index), dtype=np.intp) elif self.left_index: - if len(self.right) > 0: + if self.how == "asof": + # GH#33463 asof should always behave like a left merge + join_index = self._create_join_index( + self.left.index, + self.right.index, + left_indexer, + how="left", + ) + + elif len(self.right) > 0: join_index = self._create_join_index( self.right.index, self.left.index, diff --git a/pandas/tests/reshape/merge/test_merge_asof.py b/pandas/tests/reshape/merge/test_merge_asof.py index 3f5bb9b84372c..671f0ad2d26c7 100644 --- a/pandas/tests/reshape/merge/test_merge_asof.py +++ b/pandas/tests/reshape/merge/test_merge_asof.py @@ -6,6 +6,7 @@ import pandas as pd from pandas import ( + Index, Timedelta, merge_asof, read_csv, @@ -1338,7 +1339,9 @@ def test_merge_index_column_tz(self): "from_date": index[1:], "abc": [2.46] * 3 + [2.19], }, - index=pd.Index([1, 2, 3, 4]), + index=pd.date_range( + "2019-10-01 00:30:00", freq="30min", periods=4, tz="UTC" + ), ) tm.assert_frame_equal(result, expected) @@ -1351,7 +1354,7 @@ def test_merge_index_column_tz(self): "abc": [2.46] * 4 + [2.19], "xyz": [np.nan, 0.9, 0.8, 0.7, 0.6], }, - index=pd.Index([0, 1, 2, 3, 4]), + index=Index([0, 1, 2, 3, 4]), ) tm.assert_frame_equal(result, expected) @@ -1412,3 +1415,25 @@ def test_merge_asof_non_numerical_dtype_object(): left_by="a", right_by="left_val", ) + + +@pytest.mark.parametrize( + "kwargs", + [ + {"right_index": True, "left_index": True}, + {"left_on": "left_time", "right_index": True}, + {"left_index": True, "right_on": "right"}, + ], +) +def test_merge_asof_index_behavior(kwargs): + # GH 33463 + index = Index([1, 5, 10], name="test") + left = pd.DataFrame({"left": ["a", "b", "c"], "left_time": [1, 4, 10]}, index=index) + right = pd.DataFrame({"right": [1, 2, 3, 6, 7]}, index=[1, 2, 3, 6, 7]) + result = merge_asof(left, right, **kwargs) + + expected = pd.DataFrame( + {"left": ["a", "b", "c"], "left_time": [1, 4, 10], "right": [1, 3, 7]}, + index=index, + ) + tm.assert_frame_equal(result, expected)
- [x] closes #33463 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry The fix broke one test (`test_merge_index_column_tz` in `test_merge_asof.py`) which expected the right index instead of the left index. The default index selection does not work in case of asof and `left_index=True`. I had to catch this case here.
https://api.github.com/repos/pandas-dev/pandas/pulls/34484
2020-05-30T16:17:34Z
2021-04-12T14:41:14Z
2021-04-12T14:41:14Z
2021-04-12T17:33:51Z
DOC: start 1.0.5
diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst index b381dae3579c8..ad5bb5a5b2d72 100644 --- a/doc/source/whatsnew/index.rst +++ b/doc/source/whatsnew/index.rst @@ -24,6 +24,7 @@ Version 1.0 .. toctree:: :maxdepth: 2 + v1.0.5 v1.0.4 v1.0.3 v1.0.2 diff --git a/doc/source/whatsnew/v1.0.4.rst b/doc/source/whatsnew/v1.0.4.rst index 5cc1edc9ca9cd..84b7e7d45e8b7 100644 --- a/doc/source/whatsnew/v1.0.4.rst +++ b/doc/source/whatsnew/v1.0.4.rst @@ -45,4 +45,4 @@ Bug fixes Contributors ~~~~~~~~~~~~ -.. contributors:: v1.0.3..v1.0.4|HEAD +.. contributors:: v1.0.3..v1.0.4 diff --git a/doc/source/whatsnew/v1.0.5.rst b/doc/source/whatsnew/v1.0.5.rst new file mode 100644 index 0000000000000..1edc7e1cad72f --- /dev/null +++ b/doc/source/whatsnew/v1.0.5.rst @@ -0,0 +1,31 @@ + +.. _whatsnew_105: + +What's new in 1.0.5 (June XX, 2020) +----------------------------------- + +These are the changes in pandas 1.0.5. See :ref:`release` for a full changelog +including other versions of pandas. + +{{ header }} + +.. --------------------------------------------------------------------------- + +.. _whatsnew_105.regressions: + +Fixed regressions +~~~~~~~~~~~~~~~~~ +- +- + +.. _whatsnew_105.bug_fixes: + +Bug fixes +~~~~~~~~~ +- +- + +Contributors +~~~~~~~~~~~~ + +.. contributors:: v1.0.4..v1.0.5|HEAD
xref https://github.com/pandas-dev/pandas/pull/33970#issuecomment-624029963
https://api.github.com/repos/pandas-dev/pandas/pulls/34481
2020-05-30T14:13:52Z
2020-05-31T18:04:34Z
2020-05-31T18:04:34Z
2020-06-01T15:43:56Z
CLN: Removed duplicated test data
diff --git a/pandas/tests/io/conftest.py b/pandas/tests/io/conftest.py index f1de15dd34464..fcee25c258efa 100644 --- a/pandas/tests/io/conftest.py +++ b/pandas/tests/io/conftest.py @@ -10,7 +10,7 @@ @pytest.fixture def tips_file(datapath): """Path to the tips dataset""" - return datapath("io", "parser", "data", "tips.csv") + return datapath("io", "data", "csv", "tips.csv") @pytest.fixture diff --git a/pandas/tests/io/parser/data/test1.csv.bz2 b/pandas/tests/io/data/csv/test1.csv.bz2 similarity index 100% rename from pandas/tests/io/parser/data/test1.csv.bz2 rename to pandas/tests/io/data/csv/test1.csv.bz2 diff --git a/pandas/tests/io/parser/data/test1.csv.gz b/pandas/tests/io/data/csv/test1.csv.gz similarity index 100% rename from pandas/tests/io/parser/data/test1.csv.gz rename to pandas/tests/io/data/csv/test1.csv.gz diff --git a/pandas/tests/io/parser/data/tips.csv.bz2 b/pandas/tests/io/data/csv/tips.csv.bz2 similarity index 100% rename from pandas/tests/io/parser/data/tips.csv.bz2 rename to pandas/tests/io/data/csv/tips.csv.bz2 diff --git a/pandas/tests/io/parser/data/tips.csv.gz b/pandas/tests/io/data/csv/tips.csv.gz similarity index 100% rename from pandas/tests/io/parser/data/tips.csv.gz rename to pandas/tests/io/data/csv/tips.csv.gz diff --git a/pandas/tests/io/parser/conftest.py b/pandas/tests/io/parser/conftest.py index 15967e3be176a..d03c85f65ea8d 100644 --- a/pandas/tests/io/parser/conftest.py +++ b/pandas/tests/io/parser/conftest.py @@ -53,11 +53,11 @@ def csv_dir_path(datapath): @pytest.fixture -def csv1(csv_dir_path): +def csv1(datapath): """ The path to the data file "test1.csv" needed for parser tests. """ - return os.path.join(csv_dir_path, "test1.csv") + return os.path.join(datapath("io", "data", "csv"), "test1.csv") _cParserHighMemory = CParserHighMemory() diff --git a/pandas/tests/io/parser/data/test1.csv b/pandas/tests/io/parser/data/test1.csv deleted file mode 100644 index 4bdb62943c4c8..0000000000000 --- a/pandas/tests/io/parser/data/test1.csv +++ /dev/null @@ -1,8 +0,0 @@ -index,A,B,C,D -2000-01-03 00:00:00,0.980268513777,3.68573087906,-0.364216805298,-1.15973806169 -2000-01-04 00:00:00,1.04791624281,-0.0412318367011,-0.16181208307,0.212549316967 -2000-01-05 00:00:00,0.498580885705,0.731167677815,-0.537677223318,1.34627041952 -2000-01-06 00:00:00,1.12020151869,1.56762092543,0.00364077397681,0.67525259227 -2000-01-07 00:00:00,-0.487094399463,0.571454623474,-1.6116394093,0.103468562917 -2000-01-10 00:00:00,0.836648671666,0.246461918642,0.588542635376,1.0627820613 -2000-01-11 00:00:00,-0.157160753327,1.34030689438,1.19577795622,-1.09700699751 \ No newline at end of file diff --git a/pandas/tests/io/parser/data/tips.csv b/pandas/tests/io/parser/data/tips.csv deleted file mode 100644 index 856a65a69e647..0000000000000 --- a/pandas/tests/io/parser/data/tips.csv +++ /dev/null @@ -1,245 +0,0 @@ -total_bill,tip,sex,smoker,day,time,size -16.99,1.01,Female,No,Sun,Dinner,2 -10.34,1.66,Male,No,Sun,Dinner,3 -21.01,3.5,Male,No,Sun,Dinner,3 -23.68,3.31,Male,No,Sun,Dinner,2 -24.59,3.61,Female,No,Sun,Dinner,4 -25.29,4.71,Male,No,Sun,Dinner,4 -8.77,2.0,Male,No,Sun,Dinner,2 -26.88,3.12,Male,No,Sun,Dinner,4 -15.04,1.96,Male,No,Sun,Dinner,2 -14.78,3.23,Male,No,Sun,Dinner,2 -10.27,1.71,Male,No,Sun,Dinner,2 -35.26,5.0,Female,No,Sun,Dinner,4 -15.42,1.57,Male,No,Sun,Dinner,2 -18.43,3.0,Male,No,Sun,Dinner,4 -14.83,3.02,Female,No,Sun,Dinner,2 -21.58,3.92,Male,No,Sun,Dinner,2 -10.33,1.67,Female,No,Sun,Dinner,3 -16.29,3.71,Male,No,Sun,Dinner,3 -16.97,3.5,Female,No,Sun,Dinner,3 -20.65,3.35,Male,No,Sat,Dinner,3 -17.92,4.08,Male,No,Sat,Dinner,2 -20.29,2.75,Female,No,Sat,Dinner,2 -15.77,2.23,Female,No,Sat,Dinner,2 -39.42,7.58,Male,No,Sat,Dinner,4 -19.82,3.18,Male,No,Sat,Dinner,2 -17.81,2.34,Male,No,Sat,Dinner,4 -13.37,2.0,Male,No,Sat,Dinner,2 -12.69,2.0,Male,No,Sat,Dinner,2 -21.7,4.3,Male,No,Sat,Dinner,2 -19.65,3.0,Female,No,Sat,Dinner,2 -9.55,1.45,Male,No,Sat,Dinner,2 -18.35,2.5,Male,No,Sat,Dinner,4 -15.06,3.0,Female,No,Sat,Dinner,2 -20.69,2.45,Female,No,Sat,Dinner,4 -17.78,3.27,Male,No,Sat,Dinner,2 -24.06,3.6,Male,No,Sat,Dinner,3 -16.31,2.0,Male,No,Sat,Dinner,3 -16.93,3.07,Female,No,Sat,Dinner,3 -18.69,2.31,Male,No,Sat,Dinner,3 -31.27,5.0,Male,No,Sat,Dinner,3 -16.04,2.24,Male,No,Sat,Dinner,3 -17.46,2.54,Male,No,Sun,Dinner,2 -13.94,3.06,Male,No,Sun,Dinner,2 -9.68,1.32,Male,No,Sun,Dinner,2 -30.4,5.6,Male,No,Sun,Dinner,4 -18.29,3.0,Male,No,Sun,Dinner,2 -22.23,5.0,Male,No,Sun,Dinner,2 -32.4,6.0,Male,No,Sun,Dinner,4 -28.55,2.05,Male,No,Sun,Dinner,3 -18.04,3.0,Male,No,Sun,Dinner,2 -12.54,2.5,Male,No,Sun,Dinner,2 -10.29,2.6,Female,No,Sun,Dinner,2 -34.81,5.2,Female,No,Sun,Dinner,4 -9.94,1.56,Male,No,Sun,Dinner,2 -25.56,4.34,Male,No,Sun,Dinner,4 -19.49,3.51,Male,No,Sun,Dinner,2 -38.01,3.0,Male,Yes,Sat,Dinner,4 -26.41,1.5,Female,No,Sat,Dinner,2 -11.24,1.76,Male,Yes,Sat,Dinner,2 -48.27,6.73,Male,No,Sat,Dinner,4 -20.29,3.21,Male,Yes,Sat,Dinner,2 -13.81,2.0,Male,Yes,Sat,Dinner,2 -11.02,1.98,Male,Yes,Sat,Dinner,2 -18.29,3.76,Male,Yes,Sat,Dinner,4 -17.59,2.64,Male,No,Sat,Dinner,3 -20.08,3.15,Male,No,Sat,Dinner,3 -16.45,2.47,Female,No,Sat,Dinner,2 -3.07,1.0,Female,Yes,Sat,Dinner,1 -20.23,2.01,Male,No,Sat,Dinner,2 -15.01,2.09,Male,Yes,Sat,Dinner,2 -12.02,1.97,Male,No,Sat,Dinner,2 -17.07,3.0,Female,No,Sat,Dinner,3 -26.86,3.14,Female,Yes,Sat,Dinner,2 -25.28,5.0,Female,Yes,Sat,Dinner,2 -14.73,2.2,Female,No,Sat,Dinner,2 -10.51,1.25,Male,No,Sat,Dinner,2 -17.92,3.08,Male,Yes,Sat,Dinner,2 -27.2,4.0,Male,No,Thur,Lunch,4 -22.76,3.0,Male,No,Thur,Lunch,2 -17.29,2.71,Male,No,Thur,Lunch,2 -19.44,3.0,Male,Yes,Thur,Lunch,2 -16.66,3.4,Male,No,Thur,Lunch,2 -10.07,1.83,Female,No,Thur,Lunch,1 -32.68,5.0,Male,Yes,Thur,Lunch,2 -15.98,2.03,Male,No,Thur,Lunch,2 -34.83,5.17,Female,No,Thur,Lunch,4 -13.03,2.0,Male,No,Thur,Lunch,2 -18.28,4.0,Male,No,Thur,Lunch,2 -24.71,5.85,Male,No,Thur,Lunch,2 -21.16,3.0,Male,No,Thur,Lunch,2 -28.97,3.0,Male,Yes,Fri,Dinner,2 -22.49,3.5,Male,No,Fri,Dinner,2 -5.75,1.0,Female,Yes,Fri,Dinner,2 -16.32,4.3,Female,Yes,Fri,Dinner,2 -22.75,3.25,Female,No,Fri,Dinner,2 -40.17,4.73,Male,Yes,Fri,Dinner,4 -27.28,4.0,Male,Yes,Fri,Dinner,2 -12.03,1.5,Male,Yes,Fri,Dinner,2 -21.01,3.0,Male,Yes,Fri,Dinner,2 -12.46,1.5,Male,No,Fri,Dinner,2 -11.35,2.5,Female,Yes,Fri,Dinner,2 -15.38,3.0,Female,Yes,Fri,Dinner,2 -44.3,2.5,Female,Yes,Sat,Dinner,3 -22.42,3.48,Female,Yes,Sat,Dinner,2 -20.92,4.08,Female,No,Sat,Dinner,2 -15.36,1.64,Male,Yes,Sat,Dinner,2 -20.49,4.06,Male,Yes,Sat,Dinner,2 -25.21,4.29,Male,Yes,Sat,Dinner,2 -18.24,3.76,Male,No,Sat,Dinner,2 -14.31,4.0,Female,Yes,Sat,Dinner,2 -14.0,3.0,Male,No,Sat,Dinner,2 -7.25,1.0,Female,No,Sat,Dinner,1 -38.07,4.0,Male,No,Sun,Dinner,3 -23.95,2.55,Male,No,Sun,Dinner,2 -25.71,4.0,Female,No,Sun,Dinner,3 -17.31,3.5,Female,No,Sun,Dinner,2 -29.93,5.07,Male,No,Sun,Dinner,4 -10.65,1.5,Female,No,Thur,Lunch,2 -12.43,1.8,Female,No,Thur,Lunch,2 -24.08,2.92,Female,No,Thur,Lunch,4 -11.69,2.31,Male,No,Thur,Lunch,2 -13.42,1.68,Female,No,Thur,Lunch,2 -14.26,2.5,Male,No,Thur,Lunch,2 -15.95,2.0,Male,No,Thur,Lunch,2 -12.48,2.52,Female,No,Thur,Lunch,2 -29.8,4.2,Female,No,Thur,Lunch,6 -8.52,1.48,Male,No,Thur,Lunch,2 -14.52,2.0,Female,No,Thur,Lunch,2 -11.38,2.0,Female,No,Thur,Lunch,2 -22.82,2.18,Male,No,Thur,Lunch,3 -19.08,1.5,Male,No,Thur,Lunch,2 -20.27,2.83,Female,No,Thur,Lunch,2 -11.17,1.5,Female,No,Thur,Lunch,2 -12.26,2.0,Female,No,Thur,Lunch,2 -18.26,3.25,Female,No,Thur,Lunch,2 -8.51,1.25,Female,No,Thur,Lunch,2 -10.33,2.0,Female,No,Thur,Lunch,2 -14.15,2.0,Female,No,Thur,Lunch,2 -16.0,2.0,Male,Yes,Thur,Lunch,2 -13.16,2.75,Female,No,Thur,Lunch,2 -17.47,3.5,Female,No,Thur,Lunch,2 -34.3,6.7,Male,No,Thur,Lunch,6 -41.19,5.0,Male,No,Thur,Lunch,5 -27.05,5.0,Female,No,Thur,Lunch,6 -16.43,2.3,Female,No,Thur,Lunch,2 -8.35,1.5,Female,No,Thur,Lunch,2 -18.64,1.36,Female,No,Thur,Lunch,3 -11.87,1.63,Female,No,Thur,Lunch,2 -9.78,1.73,Male,No,Thur,Lunch,2 -7.51,2.0,Male,No,Thur,Lunch,2 -14.07,2.5,Male,No,Sun,Dinner,2 -13.13,2.0,Male,No,Sun,Dinner,2 -17.26,2.74,Male,No,Sun,Dinner,3 -24.55,2.0,Male,No,Sun,Dinner,4 -19.77,2.0,Male,No,Sun,Dinner,4 -29.85,5.14,Female,No,Sun,Dinner,5 -48.17,5.0,Male,No,Sun,Dinner,6 -25.0,3.75,Female,No,Sun,Dinner,4 -13.39,2.61,Female,No,Sun,Dinner,2 -16.49,2.0,Male,No,Sun,Dinner,4 -21.5,3.5,Male,No,Sun,Dinner,4 -12.66,2.5,Male,No,Sun,Dinner,2 -16.21,2.0,Female,No,Sun,Dinner,3 -13.81,2.0,Male,No,Sun,Dinner,2 -17.51,3.0,Female,Yes,Sun,Dinner,2 -24.52,3.48,Male,No,Sun,Dinner,3 -20.76,2.24,Male,No,Sun,Dinner,2 -31.71,4.5,Male,No,Sun,Dinner,4 -10.59,1.61,Female,Yes,Sat,Dinner,2 -10.63,2.0,Female,Yes,Sat,Dinner,2 -50.81,10.0,Male,Yes,Sat,Dinner,3 -15.81,3.16,Male,Yes,Sat,Dinner,2 -7.25,5.15,Male,Yes,Sun,Dinner,2 -31.85,3.18,Male,Yes,Sun,Dinner,2 -16.82,4.0,Male,Yes,Sun,Dinner,2 -32.9,3.11,Male,Yes,Sun,Dinner,2 -17.89,2.0,Male,Yes,Sun,Dinner,2 -14.48,2.0,Male,Yes,Sun,Dinner,2 -9.6,4.0,Female,Yes,Sun,Dinner,2 -34.63,3.55,Male,Yes,Sun,Dinner,2 -34.65,3.68,Male,Yes,Sun,Dinner,4 -23.33,5.65,Male,Yes,Sun,Dinner,2 -45.35,3.5,Male,Yes,Sun,Dinner,3 -23.17,6.5,Male,Yes,Sun,Dinner,4 -40.55,3.0,Male,Yes,Sun,Dinner,2 -20.69,5.0,Male,No,Sun,Dinner,5 -20.9,3.5,Female,Yes,Sun,Dinner,3 -30.46,2.0,Male,Yes,Sun,Dinner,5 -18.15,3.5,Female,Yes,Sun,Dinner,3 -23.1,4.0,Male,Yes,Sun,Dinner,3 -15.69,1.5,Male,Yes,Sun,Dinner,2 -19.81,4.19,Female,Yes,Thur,Lunch,2 -28.44,2.56,Male,Yes,Thur,Lunch,2 -15.48,2.02,Male,Yes,Thur,Lunch,2 -16.58,4.0,Male,Yes,Thur,Lunch,2 -7.56,1.44,Male,No,Thur,Lunch,2 -10.34,2.0,Male,Yes,Thur,Lunch,2 -43.11,5.0,Female,Yes,Thur,Lunch,4 -13.0,2.0,Female,Yes,Thur,Lunch,2 -13.51,2.0,Male,Yes,Thur,Lunch,2 -18.71,4.0,Male,Yes,Thur,Lunch,3 -12.74,2.01,Female,Yes,Thur,Lunch,2 -13.0,2.0,Female,Yes,Thur,Lunch,2 -16.4,2.5,Female,Yes,Thur,Lunch,2 -20.53,4.0,Male,Yes,Thur,Lunch,4 -16.47,3.23,Female,Yes,Thur,Lunch,3 -26.59,3.41,Male,Yes,Sat,Dinner,3 -38.73,3.0,Male,Yes,Sat,Dinner,4 -24.27,2.03,Male,Yes,Sat,Dinner,2 -12.76,2.23,Female,Yes,Sat,Dinner,2 -30.06,2.0,Male,Yes,Sat,Dinner,3 -25.89,5.16,Male,Yes,Sat,Dinner,4 -48.33,9.0,Male,No,Sat,Dinner,4 -13.27,2.5,Female,Yes,Sat,Dinner,2 -28.17,6.5,Female,Yes,Sat,Dinner,3 -12.9,1.1,Female,Yes,Sat,Dinner,2 -28.15,3.0,Male,Yes,Sat,Dinner,5 -11.59,1.5,Male,Yes,Sat,Dinner,2 -7.74,1.44,Male,Yes,Sat,Dinner,2 -30.14,3.09,Female,Yes,Sat,Dinner,4 -12.16,2.2,Male,Yes,Fri,Lunch,2 -13.42,3.48,Female,Yes,Fri,Lunch,2 -8.58,1.92,Male,Yes,Fri,Lunch,1 -15.98,3.0,Female,No,Fri,Lunch,3 -13.42,1.58,Male,Yes,Fri,Lunch,2 -16.27,2.5,Female,Yes,Fri,Lunch,2 -10.09,2.0,Female,Yes,Fri,Lunch,2 -20.45,3.0,Male,No,Sat,Dinner,4 -13.28,2.72,Male,No,Sat,Dinner,2 -22.12,2.88,Female,Yes,Sat,Dinner,2 -24.01,2.0,Male,Yes,Sat,Dinner,4 -15.69,3.0,Male,Yes,Sat,Dinner,3 -11.61,3.39,Male,No,Sat,Dinner,2 -10.77,1.47,Male,No,Sat,Dinner,2 -15.53,3.0,Male,Yes,Sat,Dinner,2 -10.07,1.25,Male,No,Sat,Dinner,2 -12.6,1.0,Male,Yes,Sat,Dinner,2 -32.83,1.17,Male,Yes,Sat,Dinner,2 -35.83,4.67,Female,No,Sat,Dinner,3 -29.03,5.92,Male,No,Sat,Dinner,3 -27.18,2.0,Female,Yes,Sat,Dinner,2 -22.67,2.0,Male,Yes,Sat,Dinner,2 -17.82,1.75,Male,No,Sat,Dinner,2 -18.78,3.0,Female,No,Thur,Dinner,2 diff --git a/pandas/tests/io/parser/test_encoding.py b/pandas/tests/io/parser/test_encoding.py index 13b74cf29f857..de7b3bed034c7 100644 --- a/pandas/tests/io/parser/test_encoding.py +++ b/pandas/tests/io/parser/test_encoding.py @@ -133,19 +133,21 @@ def test_read_csv_utf_aliases(all_parsers, utf_value, encoding_fmt): @pytest.mark.parametrize( - "fname,encoding", + "file_path,encoding", [ - ("test1.csv", "utf-8"), - ("unicode_series.csv", "latin-1"), - ("sauron.SHIFT_JIS.csv", "shiftjis"), + (("io", "data", "csv", "test1.csv"), "utf-8"), + (("io", "parser", "data", "unicode_series.csv"), "latin-1"), + (("io", "parser", "data", "sauron.SHIFT_JIS.csv"), "shiftjis"), ], ) -def test_binary_mode_file_buffers(all_parsers, csv_dir_path, fname, encoding): +def test_binary_mode_file_buffers( + all_parsers, csv_dir_path, file_path, encoding, datapath +): # gh-23779: Python csv engine shouldn't error on files opened in binary. # gh-31575: Python csv engine shouldn't error on files opened in raw binary. parser = all_parsers - fpath = os.path.join(csv_dir_path, fname) + fpath = datapath(*file_path) expected = parser.read_csv(fpath, encoding=encoding) with open(fpath, mode="r", encoding=encoding) as fa: diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py index e0dee878006b8..509ae89909699 100644 --- a/pandas/tests/io/parser/test_network.py +++ b/pandas/tests/io/parser/test_network.py @@ -49,7 +49,7 @@ def check_compressed_urls(salaries_table, compression, extension, mode, engine): @pytest.fixture def tips_df(datapath): """DataFrame with the tips dataset.""" - return read_csv(datapath("io", "parser", "data", "tips.csv")) + return read_csv(datapath("io", "data", "csv", "tips.csv")) @pytest.mark.usefixtures("s3_resource") diff --git a/pandas/tests/io/parser/test_textreader.py b/pandas/tests/io/parser/test_textreader.py index 8d5af85c20d33..1c2518646bb29 100644 --- a/pandas/tests/io/parser/test_textreader.py +++ b/pandas/tests/io/parser/test_textreader.py @@ -21,7 +21,8 @@ class TestTextReader: @pytest.fixture(autouse=True) def setup_method(self, datapath): self.dirpath = datapath("io", "parser", "data") - self.csv1 = os.path.join(self.dirpath, "test1.csv") + csv1_dirpath = datapath("io", "data", "csv") + self.csv1 = os.path.join(csv1_dirpath, "test1.csv") self.csv2 = os.path.join(self.dirpath, "test2.csv") self.xls1 = os.path.join(self.dirpath, "test.xls")
Remove duplicated test data. "test1.csv" & "tips.csv" are present in both "tests/io/parser/data" & "tests/io/data/csv". Move the "*.csv.bz2" & "*.csv.gz" files since the s3_resource fixture in tests/io/conftest requires these. Follow up from: https://github.com/pandas-dev/pandas/issues/29439 Think we should move all the data in io/parser/data/* into io/data/ so it can be re-used. Will save for follow up.
https://api.github.com/repos/pandas-dev/pandas/pulls/34477
2020-05-30T01:17:43Z
2020-06-08T01:45:23Z
2020-06-08T01:45:23Z
2020-06-08T01:45:31Z
REF: make normalize_i8_timestamps cpdef
diff --git a/pandas/_libs/tslibs/conversion.pxd b/pandas/_libs/tslibs/conversion.pxd index 15313c0c2c3dd..94f6d1d9020d2 100644 --- a/pandas/_libs/tslibs/conversion.pxd +++ b/pandas/_libs/tslibs/conversion.pxd @@ -1,6 +1,6 @@ -from cpython.datetime cimport datetime +from cpython.datetime cimport datetime, tzinfo -from numpy cimport int64_t, int32_t +from numpy cimport int64_t, int32_t, ndarray from pandas._libs.tslibs.np_datetime cimport npy_datetimestruct @@ -24,3 +24,5 @@ cdef int64_t get_datetime64_nanos(object val) except? -1 cpdef datetime localize_pydatetime(datetime dt, object tz) cdef int64_t cast_from_unit(object ts, str unit) except? -1 + +cpdef ndarray[int64_t] normalize_i8_timestamps(const int64_t[:] stamps, tzinfo tz) diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 8fd2f6b476e1c..cbb27bf8e9917 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -763,7 +763,7 @@ cpdef inline datetime localize_pydatetime(datetime dt, object tz): @cython.wraparound(False) @cython.boundscheck(False) -def normalize_i8_timestamps(int64_t[:] stamps, object tz): +cpdef ndarray[int64_t] normalize_i8_timestamps(const int64_t[:] stamps, tzinfo tz): """ Normalize each of the (nanosecond) timezone aware timestamps in the given array by rounding down to the beginning of the day (i.e. midnight). @@ -774,31 +774,6 @@ def normalize_i8_timestamps(int64_t[:] stamps, object tz): stamps : int64 ndarray tz : tzinfo or None - Returns - ------- - result : int64 ndarray of converted of normalized nanosecond timestamps - """ - cdef: - int64_t[:] result - - result = _normalize_local(stamps, tz) - - return result.base # .base to access underlying np.ndarray - - -@cython.wraparound(False) -@cython.boundscheck(False) -cdef int64_t[:] _normalize_local(const int64_t[:] stamps, tzinfo tz): - """ - Normalize each of the (nanosecond) timestamps in the given array by - rounding down to the beginning of the day (i.e. midnight) for the - given timezone `tz`. - - Parameters - ---------- - stamps : int64 ndarray - tz : tzinfo - Returns ------- result : int64 ndarray of converted of normalized nanosecond timestamps @@ -843,7 +818,7 @@ cdef int64_t[:] _normalize_local(const int64_t[:] stamps, tzinfo tz): dt64_to_dtstruct(stamps[i] + deltas[pos[i]], &dts) result[i] = _normalized_stamp(&dts) - return result + return result.base # `.base` to access underlying ndarray cdef inline int64_t _normalized_stamp(npy_datetimestruct *dts) nogil: diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 48c4afe7d4c1b..82068c2178704 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -16,8 +16,15 @@ cnp.import_array() from cpython.object cimport (PyObject_RichCompareBool, PyObject_RichCompare, Py_GT, Py_GE, Py_EQ, Py_NE, Py_LT, Py_LE) -from cpython.datetime cimport (datetime, time, PyDateTime_Check, PyDelta_Check, - PyTZInfo_Check, PyDateTime_IMPORT) +from cpython.datetime cimport ( + datetime, + time, + tzinfo, + PyDateTime_Check, + PyDelta_Check, + PyTZInfo_Check, + PyDateTime_IMPORT, +) PyDateTime_IMPORT from pandas._libs.tslibs.util cimport ( @@ -29,10 +36,12 @@ from pandas._libs.tslibs.base cimport ABCTimedelta, ABCTimestamp from pandas._libs.tslibs cimport ccalendar -from pandas._libs.tslibs.conversion import normalize_i8_timestamps from pandas._libs.tslibs.conversion cimport ( - _TSObject, convert_to_tsobject, - convert_datetime_to_tsobject) + _TSObject, + convert_to_tsobject, + convert_datetime_to_tsobject, + normalize_i8_timestamps, +) from pandas._libs.tslibs.fields import get_start_end_field, get_date_name_field from pandas._libs.tslibs.nattype cimport NPY_NAT, c_NaT as NaT from pandas._libs.tslibs.np_datetime cimport ( @@ -1461,13 +1470,18 @@ default 'raise' """ Normalize Timestamp to midnight, preserving tz information. """ - if self.tz is None or is_utc(self.tz): + cdef: + ndarray[int64_t] normalized + tzinfo own_tz = self.tzinfo # could be None + + if own_tz is None or is_utc(own_tz): DAY_NS = ccalendar.DAY_NANOS normalized_value = self.value - (self.value % DAY_NS) - return Timestamp(normalized_value).tz_localize(self.tz) - normalized_value = normalize_i8_timestamps( - np.array([self.value], dtype='i8'), tz=self.tz)[0] - return Timestamp(normalized_value).tz_localize(self.tz) + return Timestamp(normalized_value).tz_localize(own_tz) + + normalized = normalize_i8_timestamps( + np.array([self.value], dtype='i8'), tz=own_tz) + return Timestamp(normalized[0]).tz_localize(own_tz) # Add the min and max fields at the class level
there's a tiny perf bump, but it all comes from doing `own_tz = self.tzinfo` up-front instead of accessing `self.tz` multiple times within `Timestamp.normalize`. With just the cimport, this is actually slightly slower, which is something of a mystery. The main upside is that we now only cimport from libconversion, which im hopeful will make some other circular-imports easier to simplify.the perf improvement
https://api.github.com/repos/pandas-dev/pandas/pulls/34476
2020-05-30T01:15:05Z
2020-05-31T23:01:05Z
2020-05-31T23:01:05Z
2020-05-31T23:06:19Z
BLD/PERF: bump cython to 0.29.19 for searchsorted
diff --git a/asv_bench/asv.conf.json b/asv_bench/asv.conf.json index 7c10a2d17775a..0f596e3481953 100644 --- a/asv_bench/asv.conf.json +++ b/asv_bench/asv.conf.json @@ -39,7 +39,7 @@ // followed by the pip installed packages). "matrix": { "numpy": [], - "Cython": ["0.29.16"], + "Cython": ["0.29.19"], "matplotlib": [], "sqlalchemy": [], "scipy": [], diff --git a/ci/deps/azure-36-32bit.yaml b/ci/deps/azure-36-32bit.yaml index 15704cf0d5427..0ae6b011e1916 100644 --- a/ci/deps/azure-36-32bit.yaml +++ b/ci/deps/azure-36-32bit.yaml @@ -22,5 +22,5 @@ dependencies: # see comment above - pip - pip: - - cython>=0.29.16 + - cython>=0.29.19 - pytest>=5.0.1 diff --git a/ci/deps/azure-36-locale.yaml b/ci/deps/azure-36-locale.yaml index 56da56b45b702..b17147ff0c650 100644 --- a/ci/deps/azure-36-locale.yaml +++ b/ci/deps/azure-36-locale.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.6.* # tools - - cython>=0.29.16 + - cython>=0.29.19 - pytest>=5.0.1 - pytest-xdist>=1.21 - pytest-asyncio diff --git a/ci/deps/azure-36-locale_slow.yaml b/ci/deps/azure-36-locale_slow.yaml index c086b3651afc3..5c5c68ca8b1d1 100644 --- a/ci/deps/azure-36-locale_slow.yaml +++ b/ci/deps/azure-36-locale_slow.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.6.* # tools - - cython>=0.29.16 + - cython>=0.29.19 - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/azure-36-minimum_versions.yaml b/ci/deps/azure-36-minimum_versions.yaml index f5af7bcf36189..af2f17faace1c 100644 --- a/ci/deps/azure-36-minimum_versions.yaml +++ b/ci/deps/azure-36-minimum_versions.yaml @@ -5,7 +5,7 @@ dependencies: - python=3.6.1 # tools - - cython=0.29.16 + - cython=0.29.19 - pytest=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/azure-37-locale.yaml b/ci/deps/azure-37-locale.yaml index 31155ac93931a..a54837be2fb02 100644 --- a/ci/deps/azure-37-locale.yaml +++ b/ci/deps/azure-37-locale.yaml @@ -5,7 +5,7 @@ dependencies: - python=3.7.* # tools - - cython>=0.29.16 + - cython>=0.29.19 - pytest>=5.0.1 - pytest-xdist>=1.21 - pytest-asyncio diff --git a/ci/deps/azure-37-numpydev.yaml b/ci/deps/azure-37-numpydev.yaml index 5cb58756a6ac1..e05c0eb5497ab 100644 --- a/ci/deps/azure-37-numpydev.yaml +++ b/ci/deps/azure-37-numpydev.yaml @@ -14,7 +14,7 @@ dependencies: - pytz - pip - pip: - - cython==0.29.16 # GH#34014 + - cython==0.29.19 # GH#34014 - "git+git://github.com/dateutil/dateutil.git" - "--extra-index-url https://pypi.anaconda.org/scipy-wheels-nightly/simple" - "--pre" diff --git a/ci/deps/azure-macos-36.yaml b/ci/deps/azure-macos-36.yaml index eeea249a19ca1..717201f626864 100644 --- a/ci/deps/azure-macos-36.yaml +++ b/ci/deps/azure-macos-36.yaml @@ -31,6 +31,6 @@ dependencies: - xlwt - pip - pip: - - cython>=0.29.16 + - cython>=0.29.19 - pyreadstat - pyxlsb diff --git a/ci/deps/azure-windows-36.yaml b/ci/deps/azure-windows-36.yaml index 548660cabaa67..b381569cc6b40 100644 --- a/ci/deps/azure-windows-36.yaml +++ b/ci/deps/azure-windows-36.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.6.* # tools - - cython>=0.29.16 + - cython>=0.29.19 - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/azure-windows-37.yaml b/ci/deps/azure-windows-37.yaml index e491fd57b240b..e08e10b351063 100644 --- a/ci/deps/azure-windows-37.yaml +++ b/ci/deps/azure-windows-37.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.7.* # tools - - cython>=0.29.16 + - cython>=0.29.19 - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/travis-36-cov.yaml b/ci/deps/travis-36-cov.yaml index 2968c8f188d49..427ed98f96d4f 100644 --- a/ci/deps/travis-36-cov.yaml +++ b/ci/deps/travis-36-cov.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.6.* # tools - - cython>=0.29.16 + - cython>=0.29.19 - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 @@ -15,7 +15,7 @@ dependencies: # pandas dependencies - beautifulsoup4 - botocore>=1.11 - - cython>=0.29.16 + - cython>=0.29.19 - dask - fastparquet>=0.3.2 - gcsfs diff --git a/ci/deps/travis-36-locale.yaml b/ci/deps/travis-36-locale.yaml index 3fc19f1bca084..5ca00f6e042af 100644 --- a/ci/deps/travis-36-locale.yaml +++ b/ci/deps/travis-36-locale.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.6.* # tools - - cython>=0.29.16 + - cython>=0.29.19 - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/travis-36-slow.yaml b/ci/deps/travis-36-slow.yaml index df693f0e22c71..ab171f0bcfa8c 100644 --- a/ci/deps/travis-36-slow.yaml +++ b/ci/deps/travis-36-slow.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.6.* # tools - - cython>=0.29.16 + - cython>=0.29.19 - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/travis-37-arm64.yaml b/ci/deps/travis-37-arm64.yaml index 5cb53489be225..d84605ea8bedd 100644 --- a/ci/deps/travis-37-arm64.yaml +++ b/ci/deps/travis-37-arm64.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.7.* # tools - - cython>=0.29.13 + - cython>=0.29.19 - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/travis-37.yaml b/ci/deps/travis-37.yaml index 986728d0a4a40..243efa4141d01 100644 --- a/ci/deps/travis-37.yaml +++ b/ci/deps/travis-37.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.7.* # tools - - cython>=0.29.16 + - cython>=0.29.19 - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/travis-38.yaml b/ci/deps/travis-38.yaml index b879c0f81dab2..cd2d962c38f11 100644 --- a/ci/deps/travis-38.yaml +++ b/ci/deps/travis-38.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.8.* # tools - - cython>=0.29.16 + - cython>=0.29.19 - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/environment.yml b/environment.yml index b81404094fa4c..bc883a65b9f90 100644 --- a/environment.yml +++ b/environment.yml @@ -12,7 +12,7 @@ dependencies: - asv # building - - cython>=0.29.16 + - cython>=0.29.19 # code checks - black=19.10b0 diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 8fd2f6b476e1c..22239b5a62ec8 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -835,7 +835,10 @@ cdef int64_t[:] _normalize_local(const int64_t[:] stamps, tzinfo tz): dt64_to_dtstruct(stamps[i] + delta, &dts) result[i] = _normalized_stamp(&dts) else: - pos = trans.searchsorted(stamps, side='right') - 1 + # C equivalent to `trans.searchsorted(stamps, side="right") -1 + pos = cnp.PyArray_SearchSorted( + trans, stamps.base, cnp.NPY_SEARCHRIGHT, NULL + ) - 1 for i in range(n): if stamps[i] == NPY_NAT: result[i] = NPY_NAT diff --git a/pyproject.toml b/pyproject.toml index efeb24edbdeb1..7d0d8bdaf0821 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ requires = [ "setuptools", "wheel", - "Cython>=0.29.16", # Note: sync with setup.py + "Cython>=0.29.19", # Note: sync with setup.py "numpy==1.15.4; python_version=='3.6' and platform_system!='AIX'", "numpy==1.15.4; python_version>='3.7' and platform_system!='AIX'", "numpy==1.16.0; python_version=='3.6' and platform_system=='AIX'", diff --git a/requirements-dev.txt b/requirements-dev.txt index 754ec7ae28748..4a5f0b45435ea 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -5,7 +5,7 @@ numpy>=1.15 python-dateutil>=2.7.3 pytz asv -cython>=0.29.16 +cython>=0.29.19 black==19.10b0 cpplint flake8<3.8.0 diff --git a/setup.py b/setup.py index 63510867f0dd7..49e2ad61935e4 100755 --- a/setup.py +++ b/setup.py @@ -34,7 +34,7 @@ def is_platform_mac(): min_numpy_ver = "1.15.4" -min_cython_ver = "0.29.16" # note: sync with pyproject.toml +min_cython_ver = "0.29.19" # note: sync with pyproject.toml try: import Cython
cython 0.29.19 fixed a bug that now allows us to use `cnp.PyArray_SearchSorted` to make `ndarray.searchsorted` into a C call. As a demonstration, this PR makes that change in `normalize_i8_timestamps` and shaves about 12% off of it for the scalar case (the non-scalar case is not noticeably affected) ``` In [4]: ts2 = pd.Timestamp.now("US/Pacific") In [5]: %timeit ts2.normalize() 59.3 µs ± 197 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each) # <-- master 51.8 µs ± 362 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each) # <-- PR ``` Not a particularly big deal on its own, _but_ this makes viable passing `int64_t*` instead of `int64_t[:]`, which in turn makes it so we can skip a wrapping/unwrapping step in the scalar code _and_ ideally share code between Timestamp/DatetimeArray, Period/PeriodArray, Timedelta/TimedeltaArray.
https://api.github.com/repos/pandas-dev/pandas/pulls/34475
2020-05-30T00:56:28Z
2020-06-01T23:03:36Z
null
2020-06-02T18:31:03Z
BUG: fix origin epoch when freq is Day and harmonize epoch between timezones
diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 4a4c9a1d7434b..5df80645c2b5d 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -1693,11 +1693,15 @@ def _get_timestamp_range_edges( ------- A tuple of length 2, containing the adjusted pd.Timestamp objects. """ - index_tz = first.tz - if isinstance(origin, Timestamp) and (origin.tz is None) != (index_tz is None): - raise ValueError("The origin must have the same timezone as the index.") - if isinstance(freq, Tick): + index_tz = first.tz + if isinstance(origin, Timestamp) and (origin.tz is None) != (index_tz is None): + raise ValueError("The origin must have the same timezone as the index.") + elif origin == "epoch": + # set the epoch based on the timezone to have similar bins results when + # resampling on the same kind of indexes on different timezones + origin = Timestamp("1970-01-01", tz=index_tz) + if isinstance(freq, Day): # _adjust_dates_anchored assumes 'D' means 24H, but first/last # might contain a DST transition (23H, 24H, or 25H). diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py index fe005801aaa53..9909e554aa14d 100644 --- a/pandas/tests/resample/test_datetime_index.py +++ b/pandas/tests/resample/test_datetime_index.py @@ -846,6 +846,34 @@ def test_resample_origin_with_tz(): ts.resample("5min", origin="12/31/1999 23:57:00+03:00").mean() +def test_resample_origin_epoch_with_tz_day_vs_24h(): + # GH 34474 + start, end = "2000-10-01 23:30:00+0500", "2000-12-02 00:30:00+0500" + rng = pd.date_range(start, end, freq="7min") + random_values = np.random.randn(len(rng)) + ts_1 = pd.Series(random_values, index=rng) + + result_1 = ts_1.resample("D", origin="epoch").mean() + result_2 = ts_1.resample("24H", origin="epoch").mean() + tm.assert_series_equal(result_1, result_2) + + # check that we have the same behavior with epoch even if we are not timezone aware + ts_no_tz = ts_1.tz_localize(None) + result_3 = ts_no_tz.resample("D", origin="epoch").mean() + result_4 = ts_no_tz.resample("24H", origin="epoch").mean() + tm.assert_series_equal(result_1, result_3.tz_localize(rng.tz), check_freq=False) + tm.assert_series_equal(result_1, result_4.tz_localize(rng.tz), check_freq=False) + + # check that we have the similar results with two different timezones (+2H and +5H) + start, end = "2000-10-01 23:30:00+0200", "2000-12-02 00:30:00+0200" + rng = pd.date_range(start, end, freq="7min") + ts_2 = pd.Series(random_values, index=rng) + result_5 = ts_2.resample("D", origin="epoch").mean() + result_6 = ts_2.resample("24H", origin="epoch").mean() + tm.assert_series_equal(result_1.tz_localize(None), result_5.tz_localize(None)) + tm.assert_series_equal(result_1.tz_localize(None), result_6.tz_localize(None)) + + def test_resample_origin_with_day_freq_on_dst(): # GH 31809 tz = "America/Chicago"
Follow-up of #31809. The purpose of this PR is to fix the current behavior on master: ```python import pandas as pd import numpy as np start, end = "2000-08-02 23:30:00+0500", "2000-12-02 00:30:00+0500" rng = pd.date_range(start, end, freq="7min") ts = pd.Series(np.random.randn(len(rng)), index=rng) result_1 = ts.resample("D", origin="epoch").count() result_2 = ts.resample("24H", origin="epoch").count() print(f"result_1:\n\n{result_1}\n") print(f"result_2:\n\n{result_2}\n") ``` Outputs on master: ``` result_1: 2000-08-02 00:00:00+05:00 5 2000-08-03 00:00:00+05:00 205 2000-08-04 00:00:00+05:00 206 2000-08-05 00:00:00+05:00 206 2000-08-06 00:00:00+05:00 206 ... 2000-11-28 00:00:00+05:00 206 2000-11-29 00:00:00+05:00 206 2000-11-30 00:00:00+05:00 205 2000-12-01 00:00:00+05:00 206 2000-12-02 00:00:00+05:00 5 Freq: D, Length: 123, dtype: int64 result_2: 2000-08-02 05:00:00+05:00 48 2000-08-03 05:00:00+05:00 205 2000-08-04 05:00:00+05:00 206 2000-08-05 05:00:00+05:00 206 2000-08-06 05:00:00+05:00 205 ... 2000-11-27 05:00:00+05:00 206 2000-11-28 05:00:00+05:00 206 2000-11-29 05:00:00+05:00 206 2000-11-30 05:00:00+05:00 205 2000-12-01 05:00:00+05:00 168 Freq: 24H, Length: 122, dtype: int64 ``` Expected Outputs (with this PR): ``` result_1: 2000-08-02 00:00:00+05:00 5 2000-08-03 00:00:00+05:00 205 2000-08-04 00:00:00+05:00 206 2000-08-05 00:00:00+05:00 206 2000-08-06 00:00:00+05:00 206 ... 2000-11-28 00:00:00+05:00 206 2000-11-29 00:00:00+05:00 206 2000-11-30 00:00:00+05:00 205 2000-12-01 00:00:00+05:00 206 2000-12-02 00:00:00+05:00 5 Freq: D, Length: 123, dtype: int64 result_2: 2000-08-02 00:00:00+05:00 5 2000-08-03 00:00:00+05:00 205 2000-08-04 00:00:00+05:00 206 2000-08-05 00:00:00+05:00 206 2000-08-06 00:00:00+05:00 206 ... 2000-11-28 00:00:00+05:00 206 2000-11-29 00:00:00+05:00 206 2000-11-30 00:00:00+05:00 205 2000-12-01 00:00:00+05:00 206 2000-12-02 00:00:00+05:00 5 Freq: 24H, Length: 123, dtype: int64 ``` ------ I thought of two possible solutions while fixing that: 1. Consider 'epoch' as a UNIX epoch: `pd.Timestamp(0, tz=index_tz)` 2. Consider that 'epoch' is just an helper to fix the origin: `pd.Timestamp("1970-01-01", tz=index_tz)` To have similar results between timezones, I thought the solution 2 was a better choice. The solution 2 could also help us to set the behavior `origin=epoch` as the default one in a future version since the results would be quite similar with `origin=start_day` (that is not quite the case with a DST timezone, I can provide further details if needed on why this is the case). (The solution 1 is correct in theory but is giving results that could be hard to explain to the end user.) ------ - [X] tests added / passed - [X] passes `black pandas` - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry (follow-up PR, not necessary I think)
https://api.github.com/repos/pandas-dev/pandas/pulls/34474
2020-05-30T00:44:46Z
2020-06-01T22:18:00Z
2020-06-01T22:18:00Z
2020-06-01T22:18:07Z
fix to_json for numbers larger than sys.maxsize
diff --git a/asv_bench/benchmarks/io/json.py b/asv_bench/benchmarks/io/json.py index a490e250943f5..ed0fb5b8fe342 100644 --- a/asv_bench/benchmarks/io/json.py +++ b/asv_bench/benchmarks/io/json.py @@ -1,3 +1,5 @@ +import sys + import numpy as np from pandas import DataFrame, concat, date_range, read_json, timedelta_range @@ -82,6 +84,7 @@ def setup(self, orient, frame): timedeltas = timedelta_range(start=1, periods=N, freq="s") datetimes = date_range(start=1, periods=N, freq="s") ints = np.random.randint(100000000, size=N) + longints = sys.maxsize * np.random.randint(100000000, size=N) floats = np.random.randn(N) strings = tm.makeStringIndex(N) self.df = DataFrame(np.random.randn(N, ncols), index=np.arange(N)) @@ -120,6 +123,18 @@ def setup(self, orient, frame): index=index, ) + self.df_longint_float_str = DataFrame( + { + "longint_1": longints, + "longint_2": longints, + "float_1": floats, + "float_2": floats, + "str_1": strings, + "str_2": strings, + }, + index=index, + ) + def time_to_json(self, orient, frame): getattr(self, frame).to_json(self.fname, orient=orient) @@ -172,6 +187,7 @@ def setup(self): timedeltas = timedelta_range(start=1, periods=N, freq="s") datetimes = date_range(start=1, periods=N, freq="s") ints = np.random.randint(100000000, size=N) + longints = sys.maxsize * np.random.randint(100000000, size=N) floats = np.random.randn(N) strings = tm.makeStringIndex(N) self.df = DataFrame(np.random.randn(N, ncols), index=np.arange(N)) @@ -209,6 +225,17 @@ def setup(self): }, index=index, ) + self.df_longint_float_str = DataFrame( + { + "longint_1": longints, + "longint_2": longints, + "float_1": floats, + "float_2": floats, + "str_1": strings, + "str_2": strings, + }, + index=index, + ) def time_floats_with_int_idex_lines(self): self.df.to_json(self.fname, orient="records", lines=True) @@ -225,6 +252,9 @@ def time_float_int_lines(self): def time_float_int_str_lines(self): self.df_int_float_str.to_json(self.fname, orient="records", lines=True) + def time_float_longint_str_lines(self): + self.df_longint_float_str.to_json(self.fname, orient="records", lines=True) + class ToJSONMem: def setup_cache(self): diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 7c9fa53568f45..d6f313b5c3b35 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -1020,6 +1020,7 @@ I/O - Bug in :meth:`~pandas.io.stata.StataReader` which resulted in categorical variables with difference dtypes when reading data using an iterator. (:issue:`31544`) - :meth:`HDFStore.keys` has now an optional `include` parameter that allows the retrieval of all native HDF5 table names (:issue:`29916`) - Bug in :meth:`read_excel` for ODS files removes 0.0 values (:issue:`27222`) +- Bug in :meth:`ujson.encode` was raising an `OverflowError` with numbers larger than sys.maxsize (:issue: `34395`) Plotting ^^^^^^^^ diff --git a/pandas/_libs/src/ujson/lib/ultrajson.h b/pandas/_libs/src/ujson/lib/ultrajson.h index acb66b668e8dc..69284e1c3f2ab 100644 --- a/pandas/_libs/src/ujson/lib/ultrajson.h +++ b/pandas/_libs/src/ujson/lib/ultrajson.h @@ -150,6 +150,7 @@ enum JSTYPES { JT_INT, // (JSINT32 (signed 32-bit)) JT_LONG, // (JSINT64 (signed 64-bit)) JT_DOUBLE, // (double) + JT_BIGNUM, // integer larger than sys.maxsize JT_UTF8, // (char 8-bit) JT_ARRAY, // Array structure JT_OBJECT, // Key/Value structure @@ -187,6 +188,8 @@ typedef struct __JSONObjectEncoder { JSINT64 (*getLongValue)(JSOBJ obj, JSONTypeContext *tc); JSINT32 (*getIntValue)(JSOBJ obj, JSONTypeContext *tc); double (*getDoubleValue)(JSOBJ obj, JSONTypeContext *tc); + const char *(*getBigNumStringValue)(JSOBJ obj, JSONTypeContext *tc, + size_t *_outLen); /* Begin iteration of an iteratable object (JS_ARRAY or JS_OBJECT) diff --git a/pandas/_libs/src/ujson/lib/ultrajsonenc.c b/pandas/_libs/src/ujson/lib/ultrajsonenc.c index 065e3b2c60cf9..51aa39a16920e 100644 --- a/pandas/_libs/src/ujson/lib/ultrajsonenc.c +++ b/pandas/_libs/src/ujson/lib/ultrajsonenc.c @@ -1107,6 +1107,35 @@ void encode(JSOBJ obj, JSONObjectEncoder *enc, const char *name, Buffer_AppendCharUnchecked(enc, '\"'); break; } + + case JT_BIGNUM: { + value = enc->getBigNumStringValue(obj, &tc, &szlen); + + Buffer_Reserve(enc, RESERVE_STRING(szlen)); + if (enc->errorMsg) { + enc->endTypeContext(obj, &tc); + return; + } + + if (enc->forceASCII) { + if (!Buffer_EscapeStringValidated(obj, enc, value, + value + szlen)) { + enc->endTypeContext(obj, &tc); + enc->level--; + return; + } + } else { + if (!Buffer_EscapeStringUnvalidated(enc, value, + value + szlen)) { + enc->endTypeContext(obj, &tc); + enc->level--; + return; + } + } + + break; + + } } enc->endTypeContext(obj, &tc); diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c index c71e941f7d6e8..1de9642761961 100644 --- a/pandas/_libs/src/ujson/python/objToJSON.c +++ b/pandas/_libs/src/ujson/python/objToJSON.c @@ -1629,15 +1629,20 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { if (PyLong_Check(obj)) { PRINTMARK(); tc->type = JT_LONG; - GET_TC(tc)->longValue = PyLong_AsLongLong(obj); + int overflow = 0; + GET_TC(tc)->longValue = PyLong_AsLongLongAndOverflow(obj, &overflow); + int err; + err = (GET_TC(tc)->longValue == -1) && PyErr_Occurred(); - exc = PyErr_Occurred(); - - if (exc && PyErr_ExceptionMatches(PyExc_OverflowError)) { + if (overflow){ + PRINTMARK(); + tc->type = JT_BIGNUM; + } + else if (err) { PRINTMARK(); goto INVALID; } - + return; } else if (PyFloat_Check(obj)) { PRINTMARK(); @@ -2105,7 +2110,6 @@ void Object_endTypeContext(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { NpyArr_freeLabels(GET_TC(tc)->columnLabels, GET_TC(tc)->columnLabelsLen); GET_TC(tc)->columnLabels = NULL; - PyObject_Free(GET_TC(tc)->cStr); GET_TC(tc)->cStr = NULL; PyObject_Free(tc->prv); @@ -2126,6 +2130,19 @@ double Object_getDoubleValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { return GET_TC(tc)->doubleValue; } +const char *Object_getBigNumStringValue(JSOBJ obj, JSONTypeContext *tc, + size_t *_outLen) { + PyObject* repr = PyObject_Str(obj); + const char *str = PyUnicode_AsUTF8AndSize(repr, (Py_ssize_t *) _outLen); + char* bytes = PyObject_Malloc(*_outLen + 1); + memcpy(bytes, str, *_outLen + 1); + GET_TC(tc)->cStr = bytes; + + Py_DECREF(repr); + + return GET_TC(tc)->cStr; +} + static void Object_releaseObject(JSOBJ _obj) { Py_DECREF((PyObject *)_obj); } void Object_iterBegin(JSOBJ obj, JSONTypeContext *tc) { @@ -2181,6 +2198,7 @@ PyObject *objToJSON(PyObject *Py_UNUSED(self), PyObject *args, Object_getLongValue, NULL, // getIntValue is unused Object_getDoubleValue, + Object_getBigNumStringValue, Object_iterBegin, Object_iterNext, Object_iterEnd, @@ -2294,7 +2312,6 @@ PyObject *objToJSON(PyObject *Py_UNUSED(self), PyObject *args, if (ret != buffer) { encoder->free(ret); } - PyErr_Format(PyExc_OverflowError, "%s", encoder->errorMsg); return NULL; } diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 8578b31fbb81e..10f49b9b81528 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -4,6 +4,7 @@ from io import StringIO import json import os +import sys import numpy as np import pytest @@ -1242,6 +1243,29 @@ def test_read_jsonl_unicode_chars(self): expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"]) tm.assert_frame_equal(result, expected) + @pytest.mark.parametrize("bigNum", [sys.maxsize + 1, -(sys.maxsize + 2)]) + def test_to_json_large_numbers(self, bigNum): + # GH34473 + series = Series(bigNum, dtype=object, index=["articleId"]) + json = series.to_json() + expected = '{"articleId":' + str(bigNum) + "}" + assert json == expected + # GH 20599 + with pytest.raises(ValueError): + json = StringIO(json) + result = read_json(json) + tm.assert_series_equal(series, result) + + df = DataFrame(bigNum, dtype=object, index=["articleId"], columns=[0]) + json = df.to_json() + expected = '{"0":{"articleId":' + str(bigNum) + "}}" + assert json == expected + # GH 20599 + with pytest.raises(ValueError): + json = StringIO(json) + result = read_json(json) + tm.assert_frame_equal(df, result) + def test_read_json_large_numbers(self): # GH18842 json = '{"articleId": "1404366058080022500245"}' diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py index 7dc73d5be1538..e1a136e1a3728 100644 --- a/pandas/tests/io/json/test_ujson.py +++ b/pandas/tests/io/json/test_ujson.py @@ -5,6 +5,7 @@ import locale import math import re +import sys import time import dateutil @@ -559,6 +560,17 @@ def test_encode_long_conversion(self): assert output == json.dumps(long_input) assert long_input == ujson.decode(output) + @pytest.mark.parametrize("bigNum", [sys.maxsize + 1, -(sys.maxsize + 2)]) + def test_dumps_ints_larger_than_maxsize(self, bigNum): + # GH34395 + bigNum = sys.maxsize + 1 + encoding = ujson.encode(bigNum) + assert str(bigNum) == encoding + + # GH20599 + with pytest.raises(ValueError): + assert ujson.loads(encoding) == bigNum + @pytest.mark.parametrize( "int_exp", ["1337E40", "1.337E40", "1337E+9", "1.337e+40", "1.337E-4"] ) @@ -570,18 +582,6 @@ def test_loads_non_str_bytes_raises(self): with pytest.raises(TypeError, match=msg): ujson.loads(None) - def test_encode_numeric_overflow(self): - with pytest.raises(OverflowError): - ujson.encode(12839128391289382193812939) - - def test_encode_numeric_overflow_nested(self): - class Nested: - x = 12839128391289382193812939 - - for _ in range(0, 100): - with pytest.raises(OverflowError): - ujson.encode(Nested()) - @pytest.mark.parametrize("val", [3590016419, 2 ** 31, 2 ** 32, (2 ** 32) - 1]) def test_decode_number_with_32bit_sign_bit(self, val): # Test that numbers that fit within 32 bits but would have the
- [x] closes #34395 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] passes performance benchmarks - [x] whatsnew entry Currently this patch causes a significant reduction for a number of the JSON performance benchmarks. A printout of my results is included below. [json_benchmarks_results.txt](https://github.com/pandas-dev/pandas/files/4704831/json_benchmarks_results.txt) I'd love to keep working on this if anybody has ideas for making the solution more efficient!
https://api.github.com/repos/pandas-dev/pandas/pulls/34473
2020-05-30T00:40:35Z
2020-06-24T23:44:41Z
2020-06-24T23:44:41Z
2020-06-26T17:51:37Z
BUG: Fix read_json shape error on multi-indexed DF/SR with orient='split' #4889
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 2243790a663df..bed471397a46a 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -954,6 +954,7 @@ I/O - Bug in :meth:`~DataFrame.to_excel` could not handle the column name `render` and was raising an ``KeyError`` (:issue:`34331`) - Bug in :meth:`~SQLDatabase.execute` was raising a ``ProgrammingError`` for some DB-API drivers when the SQL statement contained the `%` character and no parameters were present (:issue:`34211`) - Bug in :meth:`~pandas.io.stata.StataReader` which resulted in categorical variables with difference dtypes when reading data using an iterator. (:issue:`31544`) +- Bug in meth:`read_json` was raising shape error on multi-indexed Dataframe/Series with orient='split' (:issue:`4889`) Plotting ^^^^^^^^ diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index b973553a767ba..004e7d5515285 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -1030,6 +1030,8 @@ def _parse_no_numpy(self): if self.orient == "split": decoded = {str(k): v for k, v in data.items()} self.check_keys_split(decoded) + if "index" in decoded: + decoded["index"] = np.transpose(decoded["index"]).tolist() self.obj = create_series_with_explicit_dtype(**decoded) else: self.obj = create_series_with_explicit_dtype(data, dtype_if_empty=object) @@ -1121,6 +1123,8 @@ def _parse_no_numpy(self): for k, v in loads(json, precise_float=self.precise_float).items() } self.check_keys_split(decoded) + if "index" in decoded: + decoded["index"] = np.transpose(decoded["index"]).tolist() self.obj = DataFrame(dtype=None, **decoded) elif orient == "index": self.obj = DataFrame.from_dict( diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 137e4c991d080..0d0a2cdb5c818 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -1394,6 +1394,22 @@ def test_index_false_to_json_split(self, data, expected): assert result == expected + def test_read_json_frame_mutltiindex_split(self): + index = pd.MultiIndex.from_tuples([(1, 1), (2, 1), (1, 2), (2, 2)]) + expected = pd.DataFrame( + data=[[1, 1], [2, 2], [3, 3], [4, 4]], columns=["A", "B"], index=index + ) + js = expected.to_json(orient="split") + result = pd.read_json(js, orient="split") + tm.assert_frame_equal(expected, result) + + def test_read_json_series_mutltiindex_split(self): + index = pd.MultiIndex.from_tuples([(1, 1), (2, 1), (1, 2), (2, 2)]) + expected = pd.Series(data=[1, 2, 3, 4], index=index) + js = expected.to_json(orient="split") + result = pd.read_json(js, orient="split", typ="series") + tm.assert_series_equal(expected, result) + @pytest.mark.parametrize( "data", [
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/34471
2020-05-29T23:26:51Z
2020-11-06T16:54:12Z
null
2020-11-06T16:54:12Z
TYP: annotations in plotting code
diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py index 1358ddf7005a3..b8be8a66a59fd 100644 --- a/pandas/plotting/_matplotlib/converter.py +++ b/pandas/plotting/_matplotlib/converter.py @@ -113,7 +113,7 @@ def deregister(): units.registry[unit] = formatter -def _to_ordinalf(tm): +def _to_ordinalf(tm: pydt.time) -> float: tot_sec = tm.hour * 3600 + tm.minute * 60 + tm.second + float(tm.microsecond / 1e6) return tot_sec @@ -160,7 +160,7 @@ class TimeFormatter(Formatter): def __init__(self, locs): self.locs = locs - def __call__(self, x, pos=0): + def __call__(self, x, pos=0) -> str: """ Return the time of day as a formatted string. @@ -1049,7 +1049,7 @@ def set_locs(self, locs): (vmin, vmax) = (vmax, vmin) self._set_default_format(vmin, vmax) - def __call__(self, x, pos=0): + def __call__(self, x, pos=0) -> str: if self.formatdict is None: return "" @@ -1066,7 +1066,7 @@ class TimeSeries_TimedeltaFormatter(Formatter): """ @staticmethod - def format_timedelta_ticks(x, pos, n_decimals): + def format_timedelta_ticks(x, pos, n_decimals: int) -> str: """ Convert seconds to 'D days HH:MM:SS.F' """ @@ -1082,7 +1082,7 @@ def format_timedelta_ticks(x, pos, n_decimals): s = f"{int(d):d} days {s}" return s - def __call__(self, x, pos=0): + def __call__(self, x, pos=0) -> str: (vmin, vmax) = tuple(self.axis.get_view_interval()) n_decimals = int(np.ceil(np.log10(100 * 1e9 / (vmax - vmin)))) if n_decimals > 9: diff --git a/pandas/plotting/_matplotlib/timeseries.py b/pandas/plotting/_matplotlib/timeseries.py index e73a109449d62..631760c547985 100644 --- a/pandas/plotting/_matplotlib/timeseries.py +++ b/pandas/plotting/_matplotlib/timeseries.py @@ -1,7 +1,7 @@ # TODO: Use the fact that axis can have units to simplify the process import functools -from typing import Optional +from typing import TYPE_CHECKING, Optional import numpy as np @@ -20,15 +20,23 @@ TimeSeries_DateLocator, TimeSeries_TimedeltaFormatter, ) -import pandas.tseries.frequencies as frequencies -from pandas.tseries.frequencies import is_subperiod, is_superperiod +from pandas.tseries.frequencies import ( + get_period_alias, + is_subperiod, + is_superperiod, + to_offset, +) from pandas.tseries.offsets import DateOffset +if TYPE_CHECKING: + from pandas import Series, Index # noqa:F401 + + # --------------------------------------------------------------------- # Plotting functions and monkey patches -def _maybe_resample(series, ax, kwargs): +def _maybe_resample(series: "Series", ax, kwargs): # resample against axes freq if necessary freq, ax_freq = _get_freq(ax, series) @@ -42,7 +50,7 @@ def _maybe_resample(series, ax, kwargs): if ax_freq is not None and freq != ax_freq: if is_superperiod(freq, ax_freq): # upsample input series = series.copy() - series.index = series.index.asfreq(ax_freq, how="s") + series.index = series.index.asfreq(ax_freq, how="s") # type: ignore freq = ax_freq elif _is_sup(freq, ax_freq): # one is weekly how = kwargs.pop("how", "last") @@ -161,21 +169,22 @@ def _get_ax_freq(ax): return ax_freq -def get_period_alias(freq) -> Optional[str]: +def _get_period_alias(freq) -> Optional[str]: if isinstance(freq, DateOffset): freq = freq.rule_code else: freq = base_and_stride(freq)[0] - freq = frequencies.get_period_alias(freq) + freq = get_period_alias(freq) return freq -def _get_freq(ax, series): +def _get_freq(ax, series: "Series"): # get frequency from data freq = getattr(series.index, "freq", None) if freq is None: freq = getattr(series.index, "inferred_freq", None) + freq = to_offset(freq) ax_freq = _get_ax_freq(ax) @@ -184,12 +193,12 @@ def _get_freq(ax, series): freq = ax_freq # get the period frequency - freq = get_period_alias(freq) + freq = _get_period_alias(freq) return freq, ax_freq def _use_dynamic_x(ax, data): - freq = _get_index_freq(data) + freq = _get_index_freq(data.index) ax_freq = _get_ax_freq(ax) if freq is None: # convert irregular if axes has freq info @@ -201,7 +210,7 @@ def _use_dynamic_x(ax, data): if freq is None: return False - freq = get_period_alias(freq) + freq = _get_period_alias(freq) if freq is None: return False @@ -216,14 +225,16 @@ def _use_dynamic_x(ax, data): return True -def _get_index_freq(data): - freq = getattr(data.index, "freq", None) +def _get_index_freq(index: "Index") -> Optional[DateOffset]: + freq = getattr(index, "freq", None) if freq is None: - freq = getattr(data.index, "inferred_freq", None) + freq = getattr(index, "inferred_freq", None) if freq == "B": - weekdays = np.unique(data.index.dayofweek) + weekdays = np.unique(index.dayofweek) # type: ignore if (5 in weekdays) or (6 in weekdays): freq = None + + freq = to_offset(freq) return freq @@ -231,10 +242,12 @@ def _maybe_convert_index(ax, data): # tsplot converts automatically, but don't want to convert index # over and over for DataFrames if isinstance(data.index, (ABCDatetimeIndex, ABCPeriodIndex)): - freq = getattr(data.index, "freq", None) + freq = data.index.freq if freq is None: - freq = getattr(data.index, "inferred_freq", None) + # We only get here for DatetimeIndex + freq = data.index.inferred_freq + freq = to_offset(freq) if freq is None: freq = _get_ax_freq(ax) @@ -242,7 +255,7 @@ def _maybe_convert_index(ax, data): if freq is None: raise ValueError("Could not get frequency alias for plotting") - freq = get_period_alias(freq) + freq = _get_period_alias(freq) if isinstance(data.index, ABCDatetimeIndex): data = data.tz_localize(None).to_period(freq=freq)
Trying to sort out what is getting passed to the tseries.frequencies funcs in these modules.
https://api.github.com/repos/pandas-dev/pandas/pulls/34469
2020-05-29T22:46:13Z
2020-05-31T23:02:31Z
2020-05-31T23:02:31Z
2020-05-31T23:07:50Z
BUG: Index with right_index=True or left_index=True merge
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 3b755c40721fb..17beb17219c2a 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -438,8 +438,8 @@ def merge_asof( >>> pd.merge_asof(left, right, left_index=True, right_index=True) left_val right_val 1 a 1 - 5 b 3 - 10 c 7 + 3 b 3 + 7 c 7 Here is a real-world times-series example @@ -906,11 +906,10 @@ def _get_join_info(self): if self.right_index: if len(self.left) > 0: join_index = self._create_join_index( - self.left.index, self.right.index, - left_indexer, + self.left.index, right_indexer, - how="right", + how="left", ) else: join_index = self.right.index.take(right_indexer) @@ -918,11 +917,10 @@ def _get_join_info(self): elif self.left_index: if len(self.right) > 0: join_index = self._create_join_index( - self.right.index, self.left.index, - right_indexer, + self.right.index, left_indexer, - how="left", + how="right", ) else: join_index = self.left.index.take(left_indexer) @@ -939,7 +937,6 @@ def _create_join_index( index: Index, other_index: Index, indexer, - other_indexer, how: str = "left", ): """ diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index f44909b61ff7a..7cb6341a8a770 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -15,6 +15,7 @@ DataFrame, DatetimeIndex, Float64Index, + Index, Int64Index, IntervalIndex, MultiIndex, @@ -360,7 +361,9 @@ def test_handle_join_key_pass_array(self): key = np.array([0, 1, 1, 2, 2, 3], dtype=np.int64) merged = merge(left, right, left_index=True, right_on=key, how="outer") - tm.assert_series_equal(merged["key_0"], Series(key, name="key_0")) + tm.assert_series_equal( + merged["key_0"], Series(key, name="key_0", index=[0, 1, 1, 2, 2, np.nan]) + ) def test_no_overlap_more_informative_error(self): dt = datetime.now() @@ -435,7 +438,7 @@ def test_merge_left_empty_right_empty(self, join_type, kwarg): exp_in = DataFrame( columns=["a", "b", "c", "x", "y", "z"], - index=pd.Index([], dtype=object), + index=Index([], dtype=object), dtype=object, ) @@ -471,7 +474,10 @@ def check1(exp, kwarg): def check2(exp, kwarg): result = pd.merge(left, right, how="right", **kwarg) tm.assert_frame_equal(result, exp) + + def check3(exp, kwarg, index): result = pd.merge(left, right, how="outer", **kwarg) + exp.index = index tm.assert_frame_equal(result, exp) for kwarg in [ @@ -481,6 +487,13 @@ def check2(exp, kwarg): check1(exp_in, kwarg) check2(exp_out, kwarg) + check3(exp_out, dict(left_index=True, right_index=True), exp_out.index) + check3( + exp_out.copy(), + dict(left_index=True, right_on="x"), + Index([np.nan, np.nan, np.nan]), + ) + kwarg = dict(left_on="a", right_index=True) check1(exp_in, kwarg) exp_out["a"] = [0, 1, 2] @@ -1311,7 +1324,7 @@ def test_merge_on_index_with_more_values(self, how, index, expected_index): ], columns=["a", "key", "b"], ) - expected.set_index(expected_index, inplace=True) + expected.set_index(df2.index, inplace=True) tm.assert_frame_equal(result, expected) def test_merge_right_index_right(self): @@ -1324,7 +1337,7 @@ def test_merge_right_index_right(self): expected = DataFrame( {"a": [1, 2, 3, None], "key": [0, 1, 1, 2], "b": [1, 2, 2, 3]}, columns=["a", "key", "b"], - index=[0, 1, 2, np.nan], + index=[0, 1, 1, 2], ) result = left.merge(right, left_on="key", right_index=True, how="right") tm.assert_frame_equal(result, expected) @@ -1357,7 +1370,7 @@ def test_merge_take_missing_values_from_index_of_other_dtype(self): "key": Categorical(["a", "a", "b", "c"]), "b": [1, 1, 2, 3], }, - index=[0, 1, 2, np.nan], + index=Categorical(["a", "a", "b", "c"]), ) expected = expected.reindex(columns=["a", "key", "b"]) tm.assert_frame_equal(result, expected) @@ -2349,3 +2362,16 @@ def test_merge_join_cols_error_reporting_on_and_index(func, kwargs): ) with pytest.raises(MergeError, match=msg): getattr(pd, func)(left, right, on="a", **kwargs) + + +@pytest.mark.parametrize( + ("index", "how", "values"), + [(Index(["a", "b"]), "left", ["a", "b"]), (Index([0, 1]), "right", ["a", "c"])], +) +def test_left_index_true_left_and_righ_join_target_index(index, how, values): + left = DataFrame(index=["a", "b"]) + right = DataFrame({"x": ["a", "c"]}) + + result = merge(left, right, left_index=True, right_on="x", how=how) + expected = DataFrame({"x": values}, index=index) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/reshape/merge/test_merge_index_as_string.py b/pandas/tests/reshape/merge/test_merge_index_as_string.py index d20d93370ec7e..651dd93c95001 100644 --- a/pandas/tests/reshape/merge/test_merge_index_as_string.py +++ b/pandas/tests/reshape/merge/test_merge_index_as_string.py @@ -185,5 +185,8 @@ def test_join_indexes_and_columns_on(df1, df2, left_index, join_type): result = left_df.join( right_df, on=["outer", "inner"], how=join_type, lsuffix="_x", rsuffix="_y" ) - + if join_type == "right" and left_index == "inner": + result.index = result.index.droplevel("outer") + if join_type == "outer" and left_index == "inner": + result.index = result.index.droplevel(0) tm.assert_frame_equal(result, expected, check_like=True)
- [x] xref #28243 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Hi, I would expect the output form the merge function as specified in #28243, i.e. if you join right with right_index=True the resulting DataFrame should have the right index. The documentation says, that the index is passed on if it is part of the merge. This change broke a few test, which expected in this case that the index was the index which was merged onto the base DataFrame. I fixed them temporarily until I know how to proceed in this case. Should this as implemented be the desired behavior for the right_index=True case? The code is not final, it is just a first draft. I would have to change the tests for good after we decide how to implement this.
https://api.github.com/repos/pandas-dev/pandas/pulls/34468
2020-05-29T22:27:24Z
2021-02-11T01:38:13Z
null
2021-04-26T21:59:56Z
CLN: drop **kwds from pd.read_excel
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 88bf0e005a221..f320ad97d3dfa 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -394,6 +394,8 @@ Backwards incompatible API changes - :meth:`Series.to_timestamp` now raises a ``TypeError`` if the axis is not a :class:`PeriodIndex`. Previously an ``AttributeError`` was raised (:issue:`33327`) - :meth:`Series.to_period` now raises a ``TypeError`` if the axis is not a :class:`DatetimeIndex`. Previously an ``AttributeError`` was raised (:issue:`33327`) - :func: `pandas.api.dtypes.is_string_dtype` no longer incorrectly identifies categorical series as string. +- :func:`read_excel` no longer takes ``**kwds`` arguments. This means that passing in keyword ``chunksize`` now raises a ``TypeError`` + (previously raised a ``NotImplementedError``), while passing in keyword ``encoding`` now raises a ``TypeError`` (:issue:`34464`) ``MultiIndex.get_indexer`` interprets `method` argument differently ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index d55bdffe689f2..12019c7477fe0 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -193,8 +193,6 @@ Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than 'X'...'X'. Passing in False will cause data to be overwritten if there are duplicate names in the columns. -**kwds : optional - Optional keyword arguments can be passed to ``TextFileReader``. Returns ------- @@ -285,6 +283,7 @@ def read_excel( nrows=None, na_values=None, keep_default_na=True, + na_filter=True, verbose=False, parse_dates=False, date_parser=None, @@ -293,13 +292,8 @@ def read_excel( skipfooter=0, convert_float=True, mangle_dupe_cols=True, - **kwds, ): - for arg in ("sheet", "sheetname", "parse_cols"): - if arg in kwds: - raise TypeError(f"read_excel() got an unexpected keyword argument `{arg}`") - if not isinstance(io, ExcelFile): io = ExcelFile(io, engine=engine) elif engine and engine != io.engine: @@ -323,6 +317,7 @@ def read_excel( nrows=nrows, na_values=na_values, keep_default_na=keep_default_na, + na_filter=na_filter, verbose=verbose, parse_dates=parse_dates, date_parser=date_parser, @@ -331,7 +326,6 @@ def read_excel( skipfooter=skipfooter, convert_float=convert_float, mangle_dupe_cols=mangle_dupe_cols, - **kwds, ) @@ -861,11 +855,6 @@ def parse( DataFrame or dict of DataFrames DataFrame from the passed in Excel file. """ - if "chunksize" in kwds: - raise NotImplementedError( - "chunksize keyword of read_excel is not implemented" - ) - return self._reader.parse( sheet_name=sheet_name, header=header, diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py index fd1533dd65dc4..109da630f76a2 100644 --- a/pandas/tests/io/excel/test_readers.py +++ b/pandas/tests/io/excel/test_readers.py @@ -897,12 +897,6 @@ def test_read_excel_bool_header_arg(self, read_ext): with pytest.raises(TypeError, match=msg): pd.read_excel("test1" + read_ext, header=arg) - def test_read_excel_chunksize(self, read_ext): - # GH 8011 - msg = "chunksize keyword of read_excel is not implemented" - with pytest.raises(NotImplementedError, match=msg): - pd.read_excel("test1" + read_ext, chunksize=100) - def test_read_excel_skiprows_list(self, read_ext): # GH 4903 if pd.read_excel.keywords["engine"] == "pyxlsb": @@ -1048,17 +1042,6 @@ def test_excel_passes_na_filter(self, read_ext, na_filter): expected = DataFrame(expected, columns=["Test"]) tm.assert_frame_equal(parsed, expected) - @pytest.mark.parametrize("arg", ["sheet", "sheetname", "parse_cols"]) - @td.check_file_leaks - def test_unexpected_kwargs_raises(self, read_ext, arg): - # gh-17964 - kwarg = {arg: "Sheet1"} - msg = fr"unexpected keyword argument `{arg}`" - - with pd.ExcelFile("test1" + read_ext) as excel: - with pytest.raises(TypeError, match=msg): - pd.read_excel(excel, **kwarg) - def test_excel_table_sheet_by_index(self, read_ext, df_ref): # For some reason pd.read_excel has no attribute 'keywords' here. # Skipping based on read_ext instead. diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py index b909f1f3a958f..ba759c7766fa5 100644 --- a/pandas/tests/io/excel/test_writers.py +++ b/pandas/tests/io/excel/test_writers.py @@ -836,9 +836,7 @@ def test_to_excel_output_encoding(self, ext): with tm.ensure_clean("__tmp_to_excel_float_format__." + ext) as filename: df.to_excel(filename, sheet_name="TestSheet", encoding="utf8") - result = pd.read_excel( - filename, sheet_name="TestSheet", encoding="utf8", index_col=0 - ) + result = pd.read_excel(filename, sheet_name="TestSheet", index_col=0) tm.assert_frame_equal(result, df) def test_to_excel_unicode_filename(self, ext, path):
Drop ``**kwds`` from ``pd.read_excel``. Wrt. "new" keyword parameter ``na_filter``: This parameter already exists in the doc string and is just masked in the ``kwds`` in the signature. If you follow the code paths all the way down to parsers.py, you can see that it indeed has a default of `True``.
https://api.github.com/repos/pandas-dev/pandas/pulls/34464
2020-05-29T21:02:10Z
2020-06-01T15:37:10Z
2020-06-01T15:37:10Z
2020-06-01T16:06:50Z
REF: make Resolution an enum
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index b804ed883e693..7f7dd62540387 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -3598,9 +3598,10 @@ cpdef to_offset(freq): if not stride: stride = 1 - from .resolution import Resolution # TODO: avoid runtime import + # TODO: avoid runtime import + from .resolution import Resolution, reso_str_bump_map - if prefix in Resolution.reso_str_bump_map: + if prefix in reso_str_bump_map: stride, name = Resolution.get_stride_from_decimal( float(stride), prefix ) diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx index 2133573ee7554..b3fc1e32f68e8 100644 --- a/pandas/_libs/tslibs/resolution.pyx +++ b/pandas/_libs/tslibs/resolution.pyx @@ -1,3 +1,5 @@ +from enum import Enum + import numpy as np from numpy cimport ndarray, int64_t, int32_t @@ -25,10 +27,46 @@ cdef: int RESO_HR = 5 int RESO_DAY = 6 +reso_str_bump_map = { + "D": "H", + "H": "T", + "T": "S", + "S": "L", + "L": "U", + "U": "N", + "N": None, +} + +_abbrev_to_attrnames = {v: k for k, v in attrname_to_abbrevs.items()} + +_reso_str_map = { + RESO_NS: "nanosecond", + RESO_US: "microsecond", + RESO_MS: "millisecond", + RESO_SEC: "second", + RESO_MIN: "minute", + RESO_HR: "hour", + RESO_DAY: "day", +} + +_str_reso_map = {v: k for k, v in _reso_str_map.items()} + +# factor to multiply a value by to convert it to the next finer grained +# resolution +_reso_mult_map = { + RESO_NS: None, + RESO_US: 1000, + RESO_MS: 1000, + RESO_SEC: 1000, + RESO_MIN: 60, + RESO_HR: 60, + RESO_DAY: 24, +} # ---------------------------------------------------------------------- -def resolution(const int64_t[:] stamps, tz=None): + +def get_resolution(const int64_t[:] stamps, tz=None): cdef: Py_ssize_t i, n = len(stamps) npy_datetimestruct dts @@ -82,7 +120,7 @@ def resolution(const int64_t[:] stamps, tz=None): if curr_reso < reso: reso = curr_reso - return reso + return Resolution(reso) cdef inline int _reso_stamp(npy_datetimestruct *dts): @@ -99,7 +137,7 @@ cdef inline int _reso_stamp(npy_datetimestruct *dts): return RESO_DAY -class Resolution: +class Resolution(Enum): # Note: cython won't allow us to reference the cdef versions at the # module level @@ -111,41 +149,14 @@ class Resolution: RESO_HR = 5 RESO_DAY = 6 - _reso_str_map = { - RESO_NS: 'nanosecond', - RESO_US: 'microsecond', - RESO_MS: 'millisecond', - RESO_SEC: 'second', - RESO_MIN: 'minute', - RESO_HR: 'hour', - RESO_DAY: 'day'} - - # factor to multiply a value by to convert it to the next finer grained - # resolution - _reso_mult_map = { - RESO_NS: None, - RESO_US: 1000, - RESO_MS: 1000, - RESO_SEC: 1000, - RESO_MIN: 60, - RESO_HR: 60, - RESO_DAY: 24} - - reso_str_bump_map = { - 'D': 'H', - 'H': 'T', - 'T': 'S', - 'S': 'L', - 'L': 'U', - 'U': 'N', - 'N': None} - - _str_reso_map = {v: k for k, v in _reso_str_map.items()} - - _freq_reso_map = {v: k for k, v in attrname_to_abbrevs.items()} + def __lt__(self, other): + return self.value < other.value + + def __ge__(self, other): + return self.value >= other.value @classmethod - def get_str(cls, reso: int) -> str: + def get_str(cls, reso: "Resolution") -> str: """ Return resolution str against resolution code. @@ -154,10 +165,10 @@ class Resolution: >>> Resolution.get_str(Resolution.RESO_SEC) 'second' """ - return cls._reso_str_map.get(reso, 'day') + return _reso_str_map[reso.value] @classmethod - def get_reso(cls, resostr: str) -> int: + def get_reso(cls, resostr: str) -> "Resolution": """ Return resolution str against resolution code. @@ -169,25 +180,27 @@ class Resolution: >>> Resolution.get_reso('second') == Resolution.RESO_SEC True """ - return cls._str_reso_map.get(resostr, cls.RESO_DAY) + return cls(_str_reso_map[resostr]) @classmethod - def get_str_from_freq(cls, freq: str) -> str: + def get_attrname_from_abbrev(cls, freq: str) -> str: """ Return resolution str against frequency str. Examples -------- - >>> Resolution.get_str_from_freq('H') + >>> Resolution.get_attrname_from_abbrev('H') 'hour' """ - return cls._freq_reso_map.get(freq, 'day') + return _abbrev_to_attrnames[freq] @classmethod - def get_reso_from_freq(cls, freq: str) -> int: + def get_reso_from_freq(cls, freq: str) -> "Resolution": """ Return resolution code against frequency str. + `freq` is given by the `offset.freqstr` for some DateOffset object. + Examples -------- >>> Resolution.get_reso_from_freq('H') @@ -196,16 +209,16 @@ class Resolution: >>> Resolution.get_reso_from_freq('H') == Resolution.RESO_HR True """ - return cls.get_reso(cls.get_str_from_freq(freq)) + return cls.get_reso(cls.get_attrname_from_abbrev(freq)) @classmethod - def get_stride_from_decimal(cls, value, freq): + def get_stride_from_decimal(cls, value: float, freq: str): """ Convert freq with decimal stride into a higher freq with integer stride Parameters ---------- - value : int or float + value : float freq : str Frequency string @@ -229,13 +242,13 @@ class Resolution: return int(value), freq else: start_reso = cls.get_reso_from_freq(freq) - if start_reso == 0: + if start_reso.value == 0: raise ValueError( "Could not convert to integer offset at any resolution" ) - next_value = cls._reso_mult_map[start_reso] * value - next_name = cls.reso_str_bump_map[freq] + next_value = _reso_mult_map[start_reso.value] * value + next_name = reso_str_bump_map[freq] return cls.get_stride_from_decimal(next_value, next_name) diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index cf3cde155a3bb..b9f712e4d64fe 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -1,6 +1,6 @@ from datetime import datetime, timedelta import operator -from typing import Any, Callable, Sequence, Tuple, Type, TypeVar, Union, cast +from typing import Any, Callable, Optional, Sequence, Tuple, Type, TypeVar, Union, cast import warnings import numpy as np @@ -804,7 +804,7 @@ def _validate_scalar(self, value, msg: str, cast_str: bool = False): return value def _validate_listlike( - self, value, opname: str, cast_str: bool = False, allow_object: bool = False, + self, value, opname: str, cast_str: bool = False, allow_object: bool = False ): if isinstance(value, type(self)): return value @@ -1103,14 +1103,22 @@ def inferred_freq(self): return None @property # NB: override with cache_readonly in immutable subclasses - def _resolution(self): - return Resolution.get_reso_from_freq(self.freqstr) + def _resolution(self) -> Optional[Resolution]: + try: + return Resolution.get_reso_from_freq(self.freqstr) + except KeyError: + return None @property # NB: override with cache_readonly in immutable subclasses def resolution(self) -> str: """ Returns day, hour, minute, second, millisecond or microsecond """ + if self._resolution is None: + if is_period_dtype(self.dtype): + # somewhere in the past it was decided we default to day + return "day" + # otherwise we fall through and will raise return Resolution.get_str(self._resolution) @classmethod diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 894a519cb693e..4e31477571a5f 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -538,8 +538,8 @@ def is_normalized(self): return conversion.is_date_array_normalized(self.asi8, self.tz) @property # NB: override with cache_readonly in immutable subclasses - def _resolution(self): - return libresolution.resolution(self.asi8, self.tz) + def _resolution(self) -> libresolution.Resolution: + return libresolution.get_resolution(self.asi8, self.tz) # ---------------------------------------------------------------- # Array-Like / EA-Interface Methods diff --git a/pandas/tests/tseries/frequencies/test_freq_code.py b/pandas/tests/tseries/frequencies/test_freq_code.py index 1c51ad0c45238..51554854378ea 100644 --- a/pandas/tests/tseries/frequencies/test_freq_code.py +++ b/pandas/tests/tseries/frequencies/test_freq_code.py @@ -104,13 +104,13 @@ def test_get_to_timestamp_base(freqstr, exp_freqstr): ("N", "nanosecond"), ], ) -def test_get_str_from_freq(freqstr, expected): - assert _reso.get_str_from_freq(freqstr) == expected +def test_get_attrname_from_abbrev(freqstr, expected): + assert _reso.get_attrname_from_abbrev(freqstr) == expected @pytest.mark.parametrize("freq", ["A", "Q", "M", "D", "H", "T", "S", "L", "U", "N"]) def test_get_freq_roundtrip(freq): - result = _attrname_to_abbrevs[_reso.get_str_from_freq(freq)] + result = _attrname_to_abbrevs[_reso.get_attrname_from_abbrev(freq)] assert freq == result
I find this much clearer than just using an int. No measured performance impact.
https://api.github.com/repos/pandas-dev/pandas/pulls/34462
2020-05-29T17:41:57Z
2020-05-31T23:22:37Z
2020-05-31T23:22:37Z
2020-06-01T00:02:06Z
mask based multi-index assignment of column values described
diff --git a/doc/source/user_guide/indexing.rst b/doc/source/user_guide/indexing.rst index 6db757e726792..6843dd1eadc81 100644 --- a/doc/source/user_guide/indexing.rst +++ b/doc/source/user_guide/indexing.rst @@ -1866,29 +1866,39 @@ A chained assignment can also crop up in setting in a mixed dtype frame. These setting rules apply to all of ``.loc/.iloc``. -This is the correct access method: +The following is the recommended access method using ``.loc`` for multiple items (using ``mask``) and a single item using a fixed index: .. ipython:: python - dfc = pd.DataFrame({'A': ['aaa', 'bbb', 'ccc'], 'B': [1, 2, 3]}) - dfc.loc[0, 'A'] = 11 - dfc + dfc = pd.DataFrame({'a': ['one', 'one', 'two', + 'three', 'two', 'one', 'six'], + 'c': np.arange(7)}) + dfd = dfc.copy() + # Setting multiple items using a mask + mask = dfd['a'].str.startswith('o') + dfd.loc[mask, 'c'] = 42 + dfd + + # Setting a single item + dfd = dfc.copy() + dfd.loc[2, 'a'] = 11 + dfd -This *can* work at times, but it is not guaranteed to, and therefore should be avoided: +The following *can* work at times, but it is not guaranteed to, and therefore should be avoided: .. ipython:: python :okwarning: - dfc = dfc.copy() - dfc['A'][0] = 111 - dfc + dfd = dfc.copy() + dfd['a'][2] = 111 + dfd -This will **not** work at all, and so should be avoided: +Last, the subsequent example will **not** work at all, and so should be avoided: :: >>> pd.set_option('mode.chained_assignment','raise') - >>> dfc.loc[0]['A'] = 1111 + >>> dfd.loc[0]['a'] = 1111 Traceback (most recent call last) ... SettingWithCopyException:
- [x] closes #34383 - [x] passes `black pandas` not sure what this means, will look it up. - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - ~~[ ] tests added / passed~~ this is an PR on documentation only, unit tests are not needed. - ~~[ ] whatsnew entry~~ I think I don't need this as this is a PR related to documentation only.
https://api.github.com/repos/pandas-dev/pandas/pulls/34461
2020-05-29T16:04:08Z
2020-06-02T22:57:44Z
2020-06-02T22:57:43Z
2020-06-02T22:57:49Z
CLN: Clean csv files in test data GH34427
diff --git a/doc/source/getting_started/comparison/comparison_with_sas.rst b/doc/source/getting_started/comparison/comparison_with_sas.rst index f12d97d1d0fde..85c6ea2c31969 100644 --- a/doc/source/getting_started/comparison/comparison_with_sas.rst +++ b/doc/source/getting_started/comparison/comparison_with_sas.rst @@ -115,7 +115,7 @@ Reading external data Like SAS, pandas provides utilities for reading in data from many formats. The ``tips`` dataset, found within the pandas -tests (`csv <https://raw.github.com/pandas-dev/pandas/master/pandas/tests/data/tips.csv>`_) +tests (`csv <https://raw.github.com/pandas-dev/pandas/master/pandas/tests/io/data/csv/tips.csv>`_) will be used in many of the following examples. SAS provides ``PROC IMPORT`` to read csv data into a data set. @@ -131,7 +131,7 @@ The pandas method is :func:`read_csv`, which works similarly. .. ipython:: python url = ('https://raw.github.com/pandas-dev/' - 'pandas/master/pandas/tests/data/tips.csv') + 'pandas/master/pandas/tests/io/data/csv/tips.csv') tips = pd.read_csv(url) tips.head() diff --git a/doc/source/getting_started/comparison/comparison_with_sql.rst b/doc/source/getting_started/comparison/comparison_with_sql.rst index c46ec9b3f7090..aa7218c3e4fad 100644 --- a/doc/source/getting_started/comparison/comparison_with_sql.rst +++ b/doc/source/getting_started/comparison/comparison_with_sql.rst @@ -25,7 +25,7 @@ structure. .. ipython:: python url = ('https://raw.github.com/pandas-dev' - '/pandas/master/pandas/tests/data/tips.csv') + '/pandas/master/pandas/tests/io/data/csv/tips.csv') tips = pd.read_csv(url) tips.head() diff --git a/doc/source/getting_started/comparison/comparison_with_stata.rst b/doc/source/getting_started/comparison/comparison_with_stata.rst index decf12db77af2..06f9e45466243 100644 --- a/doc/source/getting_started/comparison/comparison_with_stata.rst +++ b/doc/source/getting_started/comparison/comparison_with_stata.rst @@ -112,7 +112,7 @@ Reading external data Like Stata, pandas provides utilities for reading in data from many formats. The ``tips`` data set, found within the pandas -tests (`csv <https://raw.github.com/pandas-dev/pandas/master/pandas/tests/data/tips.csv>`_) +tests (`csv <https://raw.github.com/pandas-dev/pandas/master/pandas/tests/io/data/csv/tips.csv>`_) will be used in many of the following examples. Stata provides ``import delimited`` to read csv data into a data set in memory. @@ -128,7 +128,7 @@ the data set if presented with a url. .. ipython:: python url = ('https://raw.github.com/pandas-dev' - '/pandas/master/pandas/tests/data/tips.csv') + '/pandas/master/pandas/tests/io/data/csv/tips.csv') tips = pd.read_csv(url) tips.head() diff --git a/doc/source/user_guide/visualization.rst b/doc/source/user_guide/visualization.rst index 814627043cfc8..5dca9d4c900dc 100644 --- a/doc/source/user_guide/visualization.rst +++ b/doc/source/user_guide/visualization.rst @@ -865,7 +865,7 @@ for more information. By coloring these curves differently for each class it is possible to visualize data clustering. Curves belonging to samples of the same class will usually be closer together and form larger structures. -**Note**: The "Iris" dataset is available `here <https://raw.github.com/pandas-dev/pandas/master/pandas/tests/data/iris.csv>`__. +**Note**: The "Iris" dataset is available `here <https://raw.github.com/pandas-dev/pandas/master/pandas/tests/io/data/csv/iris.csv>`__. .. ipython:: python @@ -1025,7 +1025,7 @@ be colored differently. See the R package `Radviz <https://cran.r-project.org/package=Radviz/>`__ for more information. -**Note**: The "Iris" dataset is available `here <https://raw.github.com/pandas-dev/pandas/master/pandas/tests/data/iris.csv>`__. +**Note**: The "Iris" dataset is available `here <https://raw.github.com/pandas-dev/pandas/master/pandas/tests/io/data/csv/iris.csv>`__. .. ipython:: python diff --git a/pandas/conftest.py b/pandas/conftest.py index 1e7f1b769c856..e4cb3270b9acf 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -813,7 +813,7 @@ def iris(datapath): """ The iris dataset as a DataFrame. """ - return pd.read_csv(datapath("data", "iris.csv")) + return pd.read_csv(datapath("io", "data", "csv", "iris.csv")) # ---------------------------------------------------------------- diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py index 594b95d1937ea..3056977ec78ad 100644 --- a/pandas/plotting/_misc.py +++ b/pandas/plotting/_misc.py @@ -263,7 +263,7 @@ def andrews_curves( >>> df = pd.read_csv( ... 'https://raw.github.com/pandas-dev/' - ... 'pandas/master/pandas/tests/data/iris.csv' + ... 'pandas/master/pandas/tests/io/data/csv/iris.csv' ... ) >>> pd.plotting.andrews_curves(df, 'Name') """ @@ -387,7 +387,7 @@ def parallel_coordinates( >>> df = pd.read_csv( ... 'https://raw.github.com/pandas-dev/' - ... 'pandas/master/pandas/tests/data/iris.csv' + ... 'pandas/master/pandas/tests/io/data/csv/iris.csv' ... ) >>> pd.plotting.parallel_coordinates( ... df, 'Name', color=('#556270', '#4ECDC4', '#C7F464') diff --git a/pandas/tests/data/iris.csv b/pandas/tests/data/iris.csv deleted file mode 100644 index c19b9c3688515..0000000000000 --- a/pandas/tests/data/iris.csv +++ /dev/null @@ -1,151 +0,0 @@ -SepalLength,SepalWidth,PetalLength,PetalWidth,Name -5.1,3.5,1.4,0.2,Iris-setosa -4.9,3.0,1.4,0.2,Iris-setosa -4.7,3.2,1.3,0.2,Iris-setosa -4.6,3.1,1.5,0.2,Iris-setosa -5.0,3.6,1.4,0.2,Iris-setosa -5.4,3.9,1.7,0.4,Iris-setosa -4.6,3.4,1.4,0.3,Iris-setosa -5.0,3.4,1.5,0.2,Iris-setosa -4.4,2.9,1.4,0.2,Iris-setosa -4.9,3.1,1.5,0.1,Iris-setosa -5.4,3.7,1.5,0.2,Iris-setosa -4.8,3.4,1.6,0.2,Iris-setosa -4.8,3.0,1.4,0.1,Iris-setosa -4.3,3.0,1.1,0.1,Iris-setosa -5.8,4.0,1.2,0.2,Iris-setosa -5.7,4.4,1.5,0.4,Iris-setosa -5.4,3.9,1.3,0.4,Iris-setosa -5.1,3.5,1.4,0.3,Iris-setosa -5.7,3.8,1.7,0.3,Iris-setosa -5.1,3.8,1.5,0.3,Iris-setosa -5.4,3.4,1.7,0.2,Iris-setosa -5.1,3.7,1.5,0.4,Iris-setosa -4.6,3.6,1.0,0.2,Iris-setosa -5.1,3.3,1.7,0.5,Iris-setosa -4.8,3.4,1.9,0.2,Iris-setosa -5.0,3.0,1.6,0.2,Iris-setosa -5.0,3.4,1.6,0.4,Iris-setosa -5.2,3.5,1.5,0.2,Iris-setosa -5.2,3.4,1.4,0.2,Iris-setosa -4.7,3.2,1.6,0.2,Iris-setosa -4.8,3.1,1.6,0.2,Iris-setosa -5.4,3.4,1.5,0.4,Iris-setosa -5.2,4.1,1.5,0.1,Iris-setosa -5.5,4.2,1.4,0.2,Iris-setosa -4.9,3.1,1.5,0.1,Iris-setosa -5.0,3.2,1.2,0.2,Iris-setosa -5.5,3.5,1.3,0.2,Iris-setosa -4.9,3.1,1.5,0.1,Iris-setosa -4.4,3.0,1.3,0.2,Iris-setosa -5.1,3.4,1.5,0.2,Iris-setosa -5.0,3.5,1.3,0.3,Iris-setosa -4.5,2.3,1.3,0.3,Iris-setosa -4.4,3.2,1.3,0.2,Iris-setosa -5.0,3.5,1.6,0.6,Iris-setosa -5.1,3.8,1.9,0.4,Iris-setosa -4.8,3.0,1.4,0.3,Iris-setosa -5.1,3.8,1.6,0.2,Iris-setosa -4.6,3.2,1.4,0.2,Iris-setosa -5.3,3.7,1.5,0.2,Iris-setosa -5.0,3.3,1.4,0.2,Iris-setosa -7.0,3.2,4.7,1.4,Iris-versicolor -6.4,3.2,4.5,1.5,Iris-versicolor -6.9,3.1,4.9,1.5,Iris-versicolor -5.5,2.3,4.0,1.3,Iris-versicolor -6.5,2.8,4.6,1.5,Iris-versicolor -5.7,2.8,4.5,1.3,Iris-versicolor -6.3,3.3,4.7,1.6,Iris-versicolor -4.9,2.4,3.3,1.0,Iris-versicolor -6.6,2.9,4.6,1.3,Iris-versicolor -5.2,2.7,3.9,1.4,Iris-versicolor -5.0,2.0,3.5,1.0,Iris-versicolor -5.9,3.0,4.2,1.5,Iris-versicolor -6.0,2.2,4.0,1.0,Iris-versicolor -6.1,2.9,4.7,1.4,Iris-versicolor -5.6,2.9,3.6,1.3,Iris-versicolor -6.7,3.1,4.4,1.4,Iris-versicolor -5.6,3.0,4.5,1.5,Iris-versicolor -5.8,2.7,4.1,1.0,Iris-versicolor -6.2,2.2,4.5,1.5,Iris-versicolor -5.6,2.5,3.9,1.1,Iris-versicolor -5.9,3.2,4.8,1.8,Iris-versicolor -6.1,2.8,4.0,1.3,Iris-versicolor -6.3,2.5,4.9,1.5,Iris-versicolor -6.1,2.8,4.7,1.2,Iris-versicolor -6.4,2.9,4.3,1.3,Iris-versicolor -6.6,3.0,4.4,1.4,Iris-versicolor -6.8,2.8,4.8,1.4,Iris-versicolor -6.7,3.0,5.0,1.7,Iris-versicolor -6.0,2.9,4.5,1.5,Iris-versicolor -5.7,2.6,3.5,1.0,Iris-versicolor -5.5,2.4,3.8,1.1,Iris-versicolor -5.5,2.4,3.7,1.0,Iris-versicolor -5.8,2.7,3.9,1.2,Iris-versicolor -6.0,2.7,5.1,1.6,Iris-versicolor -5.4,3.0,4.5,1.5,Iris-versicolor -6.0,3.4,4.5,1.6,Iris-versicolor -6.7,3.1,4.7,1.5,Iris-versicolor -6.3,2.3,4.4,1.3,Iris-versicolor -5.6,3.0,4.1,1.3,Iris-versicolor -5.5,2.5,4.0,1.3,Iris-versicolor -5.5,2.6,4.4,1.2,Iris-versicolor -6.1,3.0,4.6,1.4,Iris-versicolor -5.8,2.6,4.0,1.2,Iris-versicolor -5.0,2.3,3.3,1.0,Iris-versicolor -5.6,2.7,4.2,1.3,Iris-versicolor -5.7,3.0,4.2,1.2,Iris-versicolor -5.7,2.9,4.2,1.3,Iris-versicolor -6.2,2.9,4.3,1.3,Iris-versicolor -5.1,2.5,3.0,1.1,Iris-versicolor -5.7,2.8,4.1,1.3,Iris-versicolor -6.3,3.3,6.0,2.5,Iris-virginica -5.8,2.7,5.1,1.9,Iris-virginica -7.1,3.0,5.9,2.1,Iris-virginica -6.3,2.9,5.6,1.8,Iris-virginica -6.5,3.0,5.8,2.2,Iris-virginica -7.6,3.0,6.6,2.1,Iris-virginica -4.9,2.5,4.5,1.7,Iris-virginica -7.3,2.9,6.3,1.8,Iris-virginica -6.7,2.5,5.8,1.8,Iris-virginica -7.2,3.6,6.1,2.5,Iris-virginica -6.5,3.2,5.1,2.0,Iris-virginica -6.4,2.7,5.3,1.9,Iris-virginica -6.8,3.0,5.5,2.1,Iris-virginica -5.7,2.5,5.0,2.0,Iris-virginica -5.8,2.8,5.1,2.4,Iris-virginica -6.4,3.2,5.3,2.3,Iris-virginica -6.5,3.0,5.5,1.8,Iris-virginica -7.7,3.8,6.7,2.2,Iris-virginica -7.7,2.6,6.9,2.3,Iris-virginica -6.0,2.2,5.0,1.5,Iris-virginica -6.9,3.2,5.7,2.3,Iris-virginica -5.6,2.8,4.9,2.0,Iris-virginica -7.7,2.8,6.7,2.0,Iris-virginica -6.3,2.7,4.9,1.8,Iris-virginica -6.7,3.3,5.7,2.1,Iris-virginica -7.2,3.2,6.0,1.8,Iris-virginica -6.2,2.8,4.8,1.8,Iris-virginica -6.1,3.0,4.9,1.8,Iris-virginica -6.4,2.8,5.6,2.1,Iris-virginica -7.2,3.0,5.8,1.6,Iris-virginica -7.4,2.8,6.1,1.9,Iris-virginica -7.9,3.8,6.4,2.0,Iris-virginica -6.4,2.8,5.6,2.2,Iris-virginica -6.3,2.8,5.1,1.5,Iris-virginica -6.1,2.6,5.6,1.4,Iris-virginica -7.7,3.0,6.1,2.3,Iris-virginica -6.3,3.4,5.6,2.4,Iris-virginica -6.4,3.1,5.5,1.8,Iris-virginica -6.0,3.0,4.8,1.8,Iris-virginica -6.9,3.1,5.4,2.1,Iris-virginica -6.7,3.1,5.6,2.4,Iris-virginica -6.9,3.1,5.1,2.3,Iris-virginica -5.8,2.7,5.1,1.9,Iris-virginica -6.8,3.2,5.9,2.3,Iris-virginica -6.7,3.3,5.7,2.5,Iris-virginica -6.7,3.0,5.2,2.3,Iris-virginica -6.3,2.5,5.0,1.9,Iris-virginica -6.5,3.0,5.2,2.0,Iris-virginica -6.2,3.4,5.4,2.3,Iris-virginica -5.9,3.0,5.1,1.8,Iris-virginica \ No newline at end of file diff --git a/pandas/tests/data/tips.csv b/pandas/tests/data/tips.csv deleted file mode 100644 index 856a65a69e647..0000000000000 --- a/pandas/tests/data/tips.csv +++ /dev/null @@ -1,245 +0,0 @@ -total_bill,tip,sex,smoker,day,time,size -16.99,1.01,Female,No,Sun,Dinner,2 -10.34,1.66,Male,No,Sun,Dinner,3 -21.01,3.5,Male,No,Sun,Dinner,3 -23.68,3.31,Male,No,Sun,Dinner,2 -24.59,3.61,Female,No,Sun,Dinner,4 -25.29,4.71,Male,No,Sun,Dinner,4 -8.77,2.0,Male,No,Sun,Dinner,2 -26.88,3.12,Male,No,Sun,Dinner,4 -15.04,1.96,Male,No,Sun,Dinner,2 -14.78,3.23,Male,No,Sun,Dinner,2 -10.27,1.71,Male,No,Sun,Dinner,2 -35.26,5.0,Female,No,Sun,Dinner,4 -15.42,1.57,Male,No,Sun,Dinner,2 -18.43,3.0,Male,No,Sun,Dinner,4 -14.83,3.02,Female,No,Sun,Dinner,2 -21.58,3.92,Male,No,Sun,Dinner,2 -10.33,1.67,Female,No,Sun,Dinner,3 -16.29,3.71,Male,No,Sun,Dinner,3 -16.97,3.5,Female,No,Sun,Dinner,3 -20.65,3.35,Male,No,Sat,Dinner,3 -17.92,4.08,Male,No,Sat,Dinner,2 -20.29,2.75,Female,No,Sat,Dinner,2 -15.77,2.23,Female,No,Sat,Dinner,2 -39.42,7.58,Male,No,Sat,Dinner,4 -19.82,3.18,Male,No,Sat,Dinner,2 -17.81,2.34,Male,No,Sat,Dinner,4 -13.37,2.0,Male,No,Sat,Dinner,2 -12.69,2.0,Male,No,Sat,Dinner,2 -21.7,4.3,Male,No,Sat,Dinner,2 -19.65,3.0,Female,No,Sat,Dinner,2 -9.55,1.45,Male,No,Sat,Dinner,2 -18.35,2.5,Male,No,Sat,Dinner,4 -15.06,3.0,Female,No,Sat,Dinner,2 -20.69,2.45,Female,No,Sat,Dinner,4 -17.78,3.27,Male,No,Sat,Dinner,2 -24.06,3.6,Male,No,Sat,Dinner,3 -16.31,2.0,Male,No,Sat,Dinner,3 -16.93,3.07,Female,No,Sat,Dinner,3 -18.69,2.31,Male,No,Sat,Dinner,3 -31.27,5.0,Male,No,Sat,Dinner,3 -16.04,2.24,Male,No,Sat,Dinner,3 -17.46,2.54,Male,No,Sun,Dinner,2 -13.94,3.06,Male,No,Sun,Dinner,2 -9.68,1.32,Male,No,Sun,Dinner,2 -30.4,5.6,Male,No,Sun,Dinner,4 -18.29,3.0,Male,No,Sun,Dinner,2 -22.23,5.0,Male,No,Sun,Dinner,2 -32.4,6.0,Male,No,Sun,Dinner,4 -28.55,2.05,Male,No,Sun,Dinner,3 -18.04,3.0,Male,No,Sun,Dinner,2 -12.54,2.5,Male,No,Sun,Dinner,2 -10.29,2.6,Female,No,Sun,Dinner,2 -34.81,5.2,Female,No,Sun,Dinner,4 -9.94,1.56,Male,No,Sun,Dinner,2 -25.56,4.34,Male,No,Sun,Dinner,4 -19.49,3.51,Male,No,Sun,Dinner,2 -38.01,3.0,Male,Yes,Sat,Dinner,4 -26.41,1.5,Female,No,Sat,Dinner,2 -11.24,1.76,Male,Yes,Sat,Dinner,2 -48.27,6.73,Male,No,Sat,Dinner,4 -20.29,3.21,Male,Yes,Sat,Dinner,2 -13.81,2.0,Male,Yes,Sat,Dinner,2 -11.02,1.98,Male,Yes,Sat,Dinner,2 -18.29,3.76,Male,Yes,Sat,Dinner,4 -17.59,2.64,Male,No,Sat,Dinner,3 -20.08,3.15,Male,No,Sat,Dinner,3 -16.45,2.47,Female,No,Sat,Dinner,2 -3.07,1.0,Female,Yes,Sat,Dinner,1 -20.23,2.01,Male,No,Sat,Dinner,2 -15.01,2.09,Male,Yes,Sat,Dinner,2 -12.02,1.97,Male,No,Sat,Dinner,2 -17.07,3.0,Female,No,Sat,Dinner,3 -26.86,3.14,Female,Yes,Sat,Dinner,2 -25.28,5.0,Female,Yes,Sat,Dinner,2 -14.73,2.2,Female,No,Sat,Dinner,2 -10.51,1.25,Male,No,Sat,Dinner,2 -17.92,3.08,Male,Yes,Sat,Dinner,2 -27.2,4.0,Male,No,Thur,Lunch,4 -22.76,3.0,Male,No,Thur,Lunch,2 -17.29,2.71,Male,No,Thur,Lunch,2 -19.44,3.0,Male,Yes,Thur,Lunch,2 -16.66,3.4,Male,No,Thur,Lunch,2 -10.07,1.83,Female,No,Thur,Lunch,1 -32.68,5.0,Male,Yes,Thur,Lunch,2 -15.98,2.03,Male,No,Thur,Lunch,2 -34.83,5.17,Female,No,Thur,Lunch,4 -13.03,2.0,Male,No,Thur,Lunch,2 -18.28,4.0,Male,No,Thur,Lunch,2 -24.71,5.85,Male,No,Thur,Lunch,2 -21.16,3.0,Male,No,Thur,Lunch,2 -28.97,3.0,Male,Yes,Fri,Dinner,2 -22.49,3.5,Male,No,Fri,Dinner,2 -5.75,1.0,Female,Yes,Fri,Dinner,2 -16.32,4.3,Female,Yes,Fri,Dinner,2 -22.75,3.25,Female,No,Fri,Dinner,2 -40.17,4.73,Male,Yes,Fri,Dinner,4 -27.28,4.0,Male,Yes,Fri,Dinner,2 -12.03,1.5,Male,Yes,Fri,Dinner,2 -21.01,3.0,Male,Yes,Fri,Dinner,2 -12.46,1.5,Male,No,Fri,Dinner,2 -11.35,2.5,Female,Yes,Fri,Dinner,2 -15.38,3.0,Female,Yes,Fri,Dinner,2 -44.3,2.5,Female,Yes,Sat,Dinner,3 -22.42,3.48,Female,Yes,Sat,Dinner,2 -20.92,4.08,Female,No,Sat,Dinner,2 -15.36,1.64,Male,Yes,Sat,Dinner,2 -20.49,4.06,Male,Yes,Sat,Dinner,2 -25.21,4.29,Male,Yes,Sat,Dinner,2 -18.24,3.76,Male,No,Sat,Dinner,2 -14.31,4.0,Female,Yes,Sat,Dinner,2 -14.0,3.0,Male,No,Sat,Dinner,2 -7.25,1.0,Female,No,Sat,Dinner,1 -38.07,4.0,Male,No,Sun,Dinner,3 -23.95,2.55,Male,No,Sun,Dinner,2 -25.71,4.0,Female,No,Sun,Dinner,3 -17.31,3.5,Female,No,Sun,Dinner,2 -29.93,5.07,Male,No,Sun,Dinner,4 -10.65,1.5,Female,No,Thur,Lunch,2 -12.43,1.8,Female,No,Thur,Lunch,2 -24.08,2.92,Female,No,Thur,Lunch,4 -11.69,2.31,Male,No,Thur,Lunch,2 -13.42,1.68,Female,No,Thur,Lunch,2 -14.26,2.5,Male,No,Thur,Lunch,2 -15.95,2.0,Male,No,Thur,Lunch,2 -12.48,2.52,Female,No,Thur,Lunch,2 -29.8,4.2,Female,No,Thur,Lunch,6 -8.52,1.48,Male,No,Thur,Lunch,2 -14.52,2.0,Female,No,Thur,Lunch,2 -11.38,2.0,Female,No,Thur,Lunch,2 -22.82,2.18,Male,No,Thur,Lunch,3 -19.08,1.5,Male,No,Thur,Lunch,2 -20.27,2.83,Female,No,Thur,Lunch,2 -11.17,1.5,Female,No,Thur,Lunch,2 -12.26,2.0,Female,No,Thur,Lunch,2 -18.26,3.25,Female,No,Thur,Lunch,2 -8.51,1.25,Female,No,Thur,Lunch,2 -10.33,2.0,Female,No,Thur,Lunch,2 -14.15,2.0,Female,No,Thur,Lunch,2 -16.0,2.0,Male,Yes,Thur,Lunch,2 -13.16,2.75,Female,No,Thur,Lunch,2 -17.47,3.5,Female,No,Thur,Lunch,2 -34.3,6.7,Male,No,Thur,Lunch,6 -41.19,5.0,Male,No,Thur,Lunch,5 -27.05,5.0,Female,No,Thur,Lunch,6 -16.43,2.3,Female,No,Thur,Lunch,2 -8.35,1.5,Female,No,Thur,Lunch,2 -18.64,1.36,Female,No,Thur,Lunch,3 -11.87,1.63,Female,No,Thur,Lunch,2 -9.78,1.73,Male,No,Thur,Lunch,2 -7.51,2.0,Male,No,Thur,Lunch,2 -14.07,2.5,Male,No,Sun,Dinner,2 -13.13,2.0,Male,No,Sun,Dinner,2 -17.26,2.74,Male,No,Sun,Dinner,3 -24.55,2.0,Male,No,Sun,Dinner,4 -19.77,2.0,Male,No,Sun,Dinner,4 -29.85,5.14,Female,No,Sun,Dinner,5 -48.17,5.0,Male,No,Sun,Dinner,6 -25.0,3.75,Female,No,Sun,Dinner,4 -13.39,2.61,Female,No,Sun,Dinner,2 -16.49,2.0,Male,No,Sun,Dinner,4 -21.5,3.5,Male,No,Sun,Dinner,4 -12.66,2.5,Male,No,Sun,Dinner,2 -16.21,2.0,Female,No,Sun,Dinner,3 -13.81,2.0,Male,No,Sun,Dinner,2 -17.51,3.0,Female,Yes,Sun,Dinner,2 -24.52,3.48,Male,No,Sun,Dinner,3 -20.76,2.24,Male,No,Sun,Dinner,2 -31.71,4.5,Male,No,Sun,Dinner,4 -10.59,1.61,Female,Yes,Sat,Dinner,2 -10.63,2.0,Female,Yes,Sat,Dinner,2 -50.81,10.0,Male,Yes,Sat,Dinner,3 -15.81,3.16,Male,Yes,Sat,Dinner,2 -7.25,5.15,Male,Yes,Sun,Dinner,2 -31.85,3.18,Male,Yes,Sun,Dinner,2 -16.82,4.0,Male,Yes,Sun,Dinner,2 -32.9,3.11,Male,Yes,Sun,Dinner,2 -17.89,2.0,Male,Yes,Sun,Dinner,2 -14.48,2.0,Male,Yes,Sun,Dinner,2 -9.6,4.0,Female,Yes,Sun,Dinner,2 -34.63,3.55,Male,Yes,Sun,Dinner,2 -34.65,3.68,Male,Yes,Sun,Dinner,4 -23.33,5.65,Male,Yes,Sun,Dinner,2 -45.35,3.5,Male,Yes,Sun,Dinner,3 -23.17,6.5,Male,Yes,Sun,Dinner,4 -40.55,3.0,Male,Yes,Sun,Dinner,2 -20.69,5.0,Male,No,Sun,Dinner,5 -20.9,3.5,Female,Yes,Sun,Dinner,3 -30.46,2.0,Male,Yes,Sun,Dinner,5 -18.15,3.5,Female,Yes,Sun,Dinner,3 -23.1,4.0,Male,Yes,Sun,Dinner,3 -15.69,1.5,Male,Yes,Sun,Dinner,2 -19.81,4.19,Female,Yes,Thur,Lunch,2 -28.44,2.56,Male,Yes,Thur,Lunch,2 -15.48,2.02,Male,Yes,Thur,Lunch,2 -16.58,4.0,Male,Yes,Thur,Lunch,2 -7.56,1.44,Male,No,Thur,Lunch,2 -10.34,2.0,Male,Yes,Thur,Lunch,2 -43.11,5.0,Female,Yes,Thur,Lunch,4 -13.0,2.0,Female,Yes,Thur,Lunch,2 -13.51,2.0,Male,Yes,Thur,Lunch,2 -18.71,4.0,Male,Yes,Thur,Lunch,3 -12.74,2.01,Female,Yes,Thur,Lunch,2 -13.0,2.0,Female,Yes,Thur,Lunch,2 -16.4,2.5,Female,Yes,Thur,Lunch,2 -20.53,4.0,Male,Yes,Thur,Lunch,4 -16.47,3.23,Female,Yes,Thur,Lunch,3 -26.59,3.41,Male,Yes,Sat,Dinner,3 -38.73,3.0,Male,Yes,Sat,Dinner,4 -24.27,2.03,Male,Yes,Sat,Dinner,2 -12.76,2.23,Female,Yes,Sat,Dinner,2 -30.06,2.0,Male,Yes,Sat,Dinner,3 -25.89,5.16,Male,Yes,Sat,Dinner,4 -48.33,9.0,Male,No,Sat,Dinner,4 -13.27,2.5,Female,Yes,Sat,Dinner,2 -28.17,6.5,Female,Yes,Sat,Dinner,3 -12.9,1.1,Female,Yes,Sat,Dinner,2 -28.15,3.0,Male,Yes,Sat,Dinner,5 -11.59,1.5,Male,Yes,Sat,Dinner,2 -7.74,1.44,Male,Yes,Sat,Dinner,2 -30.14,3.09,Female,Yes,Sat,Dinner,4 -12.16,2.2,Male,Yes,Fri,Lunch,2 -13.42,3.48,Female,Yes,Fri,Lunch,2 -8.58,1.92,Male,Yes,Fri,Lunch,1 -15.98,3.0,Female,No,Fri,Lunch,3 -13.42,1.58,Male,Yes,Fri,Lunch,2 -16.27,2.5,Female,Yes,Fri,Lunch,2 -10.09,2.0,Female,Yes,Fri,Lunch,2 -20.45,3.0,Male,No,Sat,Dinner,4 -13.28,2.72,Male,No,Sat,Dinner,2 -22.12,2.88,Female,Yes,Sat,Dinner,2 -24.01,2.0,Male,Yes,Sat,Dinner,4 -15.69,3.0,Male,Yes,Sat,Dinner,3 -11.61,3.39,Male,No,Sat,Dinner,2 -10.77,1.47,Male,No,Sat,Dinner,2 -15.53,3.0,Male,Yes,Sat,Dinner,2 -10.07,1.25,Male,No,Sat,Dinner,2 -12.6,1.0,Male,Yes,Sat,Dinner,2 -32.83,1.17,Male,Yes,Sat,Dinner,2 -35.83,4.67,Female,No,Sat,Dinner,3 -29.03,5.92,Male,No,Sat,Dinner,3 -27.18,2.0,Female,Yes,Sat,Dinner,2 -22.67,2.0,Male,Yes,Sat,Dinner,2 -17.82,1.75,Male,No,Sat,Dinner,2 -18.78,3.0,Female,No,Thur,Dinner,2 diff --git a/pandas/tests/io/parser/data/iris.csv b/pandas/tests/io/parser/data/iris.csv deleted file mode 100644 index c19b9c3688515..0000000000000 --- a/pandas/tests/io/parser/data/iris.csv +++ /dev/null @@ -1,151 +0,0 @@ -SepalLength,SepalWidth,PetalLength,PetalWidth,Name -5.1,3.5,1.4,0.2,Iris-setosa -4.9,3.0,1.4,0.2,Iris-setosa -4.7,3.2,1.3,0.2,Iris-setosa -4.6,3.1,1.5,0.2,Iris-setosa -5.0,3.6,1.4,0.2,Iris-setosa -5.4,3.9,1.7,0.4,Iris-setosa -4.6,3.4,1.4,0.3,Iris-setosa -5.0,3.4,1.5,0.2,Iris-setosa -4.4,2.9,1.4,0.2,Iris-setosa -4.9,3.1,1.5,0.1,Iris-setosa -5.4,3.7,1.5,0.2,Iris-setosa -4.8,3.4,1.6,0.2,Iris-setosa -4.8,3.0,1.4,0.1,Iris-setosa -4.3,3.0,1.1,0.1,Iris-setosa -5.8,4.0,1.2,0.2,Iris-setosa -5.7,4.4,1.5,0.4,Iris-setosa -5.4,3.9,1.3,0.4,Iris-setosa -5.1,3.5,1.4,0.3,Iris-setosa -5.7,3.8,1.7,0.3,Iris-setosa -5.1,3.8,1.5,0.3,Iris-setosa -5.4,3.4,1.7,0.2,Iris-setosa -5.1,3.7,1.5,0.4,Iris-setosa -4.6,3.6,1.0,0.2,Iris-setosa -5.1,3.3,1.7,0.5,Iris-setosa -4.8,3.4,1.9,0.2,Iris-setosa -5.0,3.0,1.6,0.2,Iris-setosa -5.0,3.4,1.6,0.4,Iris-setosa -5.2,3.5,1.5,0.2,Iris-setosa -5.2,3.4,1.4,0.2,Iris-setosa -4.7,3.2,1.6,0.2,Iris-setosa -4.8,3.1,1.6,0.2,Iris-setosa -5.4,3.4,1.5,0.4,Iris-setosa -5.2,4.1,1.5,0.1,Iris-setosa -5.5,4.2,1.4,0.2,Iris-setosa -4.9,3.1,1.5,0.1,Iris-setosa -5.0,3.2,1.2,0.2,Iris-setosa -5.5,3.5,1.3,0.2,Iris-setosa -4.9,3.1,1.5,0.1,Iris-setosa -4.4,3.0,1.3,0.2,Iris-setosa -5.1,3.4,1.5,0.2,Iris-setosa -5.0,3.5,1.3,0.3,Iris-setosa -4.5,2.3,1.3,0.3,Iris-setosa -4.4,3.2,1.3,0.2,Iris-setosa -5.0,3.5,1.6,0.6,Iris-setosa -5.1,3.8,1.9,0.4,Iris-setosa -4.8,3.0,1.4,0.3,Iris-setosa -5.1,3.8,1.6,0.2,Iris-setosa -4.6,3.2,1.4,0.2,Iris-setosa -5.3,3.7,1.5,0.2,Iris-setosa -5.0,3.3,1.4,0.2,Iris-setosa -7.0,3.2,4.7,1.4,Iris-versicolor -6.4,3.2,4.5,1.5,Iris-versicolor -6.9,3.1,4.9,1.5,Iris-versicolor -5.5,2.3,4.0,1.3,Iris-versicolor -6.5,2.8,4.6,1.5,Iris-versicolor -5.7,2.8,4.5,1.3,Iris-versicolor -6.3,3.3,4.7,1.6,Iris-versicolor -4.9,2.4,3.3,1.0,Iris-versicolor -6.6,2.9,4.6,1.3,Iris-versicolor -5.2,2.7,3.9,1.4,Iris-versicolor -5.0,2.0,3.5,1.0,Iris-versicolor -5.9,3.0,4.2,1.5,Iris-versicolor -6.0,2.2,4.0,1.0,Iris-versicolor -6.1,2.9,4.7,1.4,Iris-versicolor -5.6,2.9,3.6,1.3,Iris-versicolor -6.7,3.1,4.4,1.4,Iris-versicolor -5.6,3.0,4.5,1.5,Iris-versicolor -5.8,2.7,4.1,1.0,Iris-versicolor -6.2,2.2,4.5,1.5,Iris-versicolor -5.6,2.5,3.9,1.1,Iris-versicolor -5.9,3.2,4.8,1.8,Iris-versicolor -6.1,2.8,4.0,1.3,Iris-versicolor -6.3,2.5,4.9,1.5,Iris-versicolor -6.1,2.8,4.7,1.2,Iris-versicolor -6.4,2.9,4.3,1.3,Iris-versicolor -6.6,3.0,4.4,1.4,Iris-versicolor -6.8,2.8,4.8,1.4,Iris-versicolor -6.7,3.0,5.0,1.7,Iris-versicolor -6.0,2.9,4.5,1.5,Iris-versicolor -5.7,2.6,3.5,1.0,Iris-versicolor -5.5,2.4,3.8,1.1,Iris-versicolor -5.5,2.4,3.7,1.0,Iris-versicolor -5.8,2.7,3.9,1.2,Iris-versicolor -6.0,2.7,5.1,1.6,Iris-versicolor -5.4,3.0,4.5,1.5,Iris-versicolor -6.0,3.4,4.5,1.6,Iris-versicolor -6.7,3.1,4.7,1.5,Iris-versicolor -6.3,2.3,4.4,1.3,Iris-versicolor -5.6,3.0,4.1,1.3,Iris-versicolor -5.5,2.5,4.0,1.3,Iris-versicolor -5.5,2.6,4.4,1.2,Iris-versicolor -6.1,3.0,4.6,1.4,Iris-versicolor -5.8,2.6,4.0,1.2,Iris-versicolor -5.0,2.3,3.3,1.0,Iris-versicolor -5.6,2.7,4.2,1.3,Iris-versicolor -5.7,3.0,4.2,1.2,Iris-versicolor -5.7,2.9,4.2,1.3,Iris-versicolor -6.2,2.9,4.3,1.3,Iris-versicolor -5.1,2.5,3.0,1.1,Iris-versicolor -5.7,2.8,4.1,1.3,Iris-versicolor -6.3,3.3,6.0,2.5,Iris-virginica -5.8,2.7,5.1,1.9,Iris-virginica -7.1,3.0,5.9,2.1,Iris-virginica -6.3,2.9,5.6,1.8,Iris-virginica -6.5,3.0,5.8,2.2,Iris-virginica -7.6,3.0,6.6,2.1,Iris-virginica -4.9,2.5,4.5,1.7,Iris-virginica -7.3,2.9,6.3,1.8,Iris-virginica -6.7,2.5,5.8,1.8,Iris-virginica -7.2,3.6,6.1,2.5,Iris-virginica -6.5,3.2,5.1,2.0,Iris-virginica -6.4,2.7,5.3,1.9,Iris-virginica -6.8,3.0,5.5,2.1,Iris-virginica -5.7,2.5,5.0,2.0,Iris-virginica -5.8,2.8,5.1,2.4,Iris-virginica -6.4,3.2,5.3,2.3,Iris-virginica -6.5,3.0,5.5,1.8,Iris-virginica -7.7,3.8,6.7,2.2,Iris-virginica -7.7,2.6,6.9,2.3,Iris-virginica -6.0,2.2,5.0,1.5,Iris-virginica -6.9,3.2,5.7,2.3,Iris-virginica -5.6,2.8,4.9,2.0,Iris-virginica -7.7,2.8,6.7,2.0,Iris-virginica -6.3,2.7,4.9,1.8,Iris-virginica -6.7,3.3,5.7,2.1,Iris-virginica -7.2,3.2,6.0,1.8,Iris-virginica -6.2,2.8,4.8,1.8,Iris-virginica -6.1,3.0,4.9,1.8,Iris-virginica -6.4,2.8,5.6,2.1,Iris-virginica -7.2,3.0,5.8,1.6,Iris-virginica -7.4,2.8,6.1,1.9,Iris-virginica -7.9,3.8,6.4,2.0,Iris-virginica -6.4,2.8,5.6,2.2,Iris-virginica -6.3,2.8,5.1,1.5,Iris-virginica -6.1,2.6,5.6,1.4,Iris-virginica -7.7,3.0,6.1,2.3,Iris-virginica -6.3,3.4,5.6,2.4,Iris-virginica -6.4,3.1,5.5,1.8,Iris-virginica -6.0,3.0,4.8,1.8,Iris-virginica -6.9,3.1,5.4,2.1,Iris-virginica -6.7,3.1,5.6,2.4,Iris-virginica -6.9,3.1,5.1,2.3,Iris-virginica -5.8,2.7,5.1,1.9,Iris-virginica -6.8,3.2,5.9,2.3,Iris-virginica -6.7,3.3,5.7,2.5,Iris-virginica -6.7,3.0,5.2,2.3,Iris-virginica -6.3,2.5,5.0,1.9,Iris-virginica -6.5,3.0,5.2,2.0,Iris-virginica -6.2,3.4,5.4,2.3,Iris-virginica -5.9,3.0,5.1,1.8,Iris-virginica \ No newline at end of file diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py index b27b028694d20..6f1d4daeb39cb 100644 --- a/pandas/tests/io/test_common.py +++ b/pandas/tests/io/test_common.py @@ -207,8 +207,8 @@ def test_read_expands_user_home_dir( @pytest.mark.parametrize( "reader, module, path", [ - (pd.read_csv, "os", ("data", "iris.csv")), - (pd.read_table, "os", ("data", "iris.csv")), + (pd.read_csv, "os", ("io", "data", "csv", "iris.csv")), + (pd.read_table, "os", ("io", "data", "csv", "iris.csv")), ( pd.read_fwf, "os", diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index bd53785e89bfe..7d4716e1b7d0c 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -278,7 +278,7 @@ def _get_exec(self): else: return self.conn.cursor() - @pytest.fixture(params=[("data", "iris.csv")]) + @pytest.fixture(params=[("io", "data", "csv", "iris.csv")]) def load_iris_data(self, datapath, request): import io diff --git a/pandas/tests/util/test_util.py b/pandas/tests/util/test_util.py index 8860e6fe272ce..d73a789b876f4 100644 --- a/pandas/tests/util/test_util.py +++ b/pandas/tests/util/test_util.py @@ -58,7 +58,7 @@ def test_datapath_missing(datapath): def test_datapath(datapath): - args = ("data", "iris.csv") + args = ("io", "data", "csv", "iris.csv") result = datapath(*args) expected = os.path.join(os.path.dirname(os.path.dirname(__file__)), *args)
- [x] closes #34427 - [x] tests passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry I deleted the `iris.csv` and `tips.csv` files which are unused duplicates (i.e. those in `pandas/tests/io/data/csv`).
https://api.github.com/repos/pandas-dev/pandas/pulls/34458
2020-05-29T12:30:26Z
2020-06-05T23:04:24Z
2020-06-05T23:04:24Z
2020-06-09T11:51:08Z
TST/REF: refactor the arithmetic tests for IntegerArray
diff --git a/pandas/tests/arrays/integer/test_arithmetic.py b/pandas/tests/arrays/integer/test_arithmetic.py index 18f1dac3c13b2..a6c47f3192175 100644 --- a/pandas/tests/arrays/integer/test_arithmetic.py +++ b/pandas/tests/arrays/integer/test_arithmetic.py @@ -1,302 +1,355 @@ +import operator + import numpy as np import pytest import pandas as pd import pandas._testing as tm -from pandas.api.types import is_float, is_float_dtype, is_scalar -from pandas.core.arrays import IntegerArray, integer_array -from pandas.tests.extension.base import BaseOpsUtil - - -class TestArithmeticOps(BaseOpsUtil): - def _check_divmod_op(self, s, op, other, exc=None): - super()._check_divmod_op(s, op, other, None) - - def _check_op(self, s, op_name, other, exc=None): - op = self.get_op_from_name(op_name) - result = op(s, other) - - # compute expected - mask = s.isna() - - # if s is a DataFrame, squeeze to a Series - # for comparison - if isinstance(s, pd.DataFrame): - result = result.squeeze() - s = s.squeeze() - mask = mask.squeeze() - - # other array is an Integer - if isinstance(other, IntegerArray): - omask = getattr(other, "mask", None) - mask = getattr(other, "data", other) - if omask is not None: - mask |= omask - - # 1 ** na is na, so need to unmask those - if op_name == "__pow__": - mask = np.where(~s.isna() & (s == 1), False, mask) - - elif op_name == "__rpow__": - other_is_one = other == 1 - if isinstance(other_is_one, pd.Series): - other_is_one = other_is_one.fillna(False) - mask = np.where(other_is_one, False, mask) - - # float result type or float op - if ( - is_float_dtype(other) - or is_float(other) - or op_name in ["__rtruediv__", "__truediv__", "__rdiv__", "__div__"] - ): - rs = s.astype("float") - expected = op(rs, other) - self._check_op_float(result, expected, mask, s, op_name, other) - - # integer result type +from pandas.core.arrays import ExtensionArray, integer_array +import pandas.core.ops as ops + + +# TODO need to use existing utility function or move this somewhere central +def get_op_from_name(op_name): + short_opname = op_name.strip("_") + try: + op = getattr(operator, short_opname) + except AttributeError: + # Assume it is the reverse operator + rop = getattr(operator, short_opname[1:]) + op = lambda x, y: rop(y, x) + + return op + + +# Basic test for the arithmetic array ops +# ----------------------------------------------------------------------------- + + +@pytest.mark.parametrize( + "opname, exp", + [("add", [1, 3, None, None, 9]), ("mul", [0, 2, None, None, 20])], + ids=["add", "mul"], +) +def test_add_mul(dtype, opname, exp): + a = pd.array([0, 1, None, 3, 4], dtype=dtype) + b = pd.array([1, 2, 3, None, 5], dtype=dtype) + + # array / array + expected = pd.array(exp, dtype=dtype) + + op = getattr(operator, opname) + result = op(a, b) + tm.assert_extension_array_equal(result, expected) + + op = getattr(ops, "r" + opname) + result = op(a, b) + tm.assert_extension_array_equal(result, expected) + + +def test_sub(dtype): + a = pd.array([1, 2, 3, None, 5], dtype=dtype) + b = pd.array([0, 1, None, 3, 4], dtype=dtype) + + result = a - b + expected = pd.array([1, 1, None, None, 1], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_div(dtype): + # for now division gives a float numpy array + a = pd.array([1, 2, 3, None, 5], dtype=dtype) + b = pd.array([0, 1, None, 3, 4], dtype=dtype) + + result = a / b + expected = np.array([np.inf, 2, np.nan, np.nan, 1.25], dtype="float64") + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("zero, negative", [(0, False), (0.0, False), (-0.0, True)]) +def test_divide_by_zero(zero, negative): + # https://github.com/pandas-dev/pandas/issues/27398 + a = pd.array([0, 1, -1, None], dtype="Int64") + result = a / zero + expected = np.array([np.nan, np.inf, -np.inf, np.nan]) + if negative: + expected *= -1 + tm.assert_numpy_array_equal(result, expected) + + +def test_floordiv(dtype): + a = pd.array([1, 2, 3, None, 5], dtype=dtype) + b = pd.array([0, 1, None, 3, 4], dtype=dtype) + + result = a // b + # Series op sets 1//0 to np.inf, which IntegerArray does not do (yet) + expected = pd.array([0, 2, None, None, 1], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_mod(dtype): + a = pd.array([1, 2, 3, None, 5], dtype=dtype) + b = pd.array([0, 1, None, 3, 4], dtype=dtype) + + result = a % b + expected = pd.array([0, 0, None, None, 1], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_pow_scalar(): + a = pd.array([-1, 0, 1, None, 2], dtype="Int64") + result = a ** 0 + expected = pd.array([1, 1, 1, 1, 1], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + result = a ** 1 + expected = pd.array([-1, 0, 1, None, 2], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + result = a ** pd.NA + expected = pd.array([None, None, 1, None, None], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + result = a ** np.nan + expected = np.array([np.nan, np.nan, 1, np.nan, np.nan], dtype="float64") + tm.assert_numpy_array_equal(result, expected) + + # reversed + a = a[1:] # Can't raise integers to negative powers. + + result = 0 ** a + expected = pd.array([1, 0, None, 0], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + result = 1 ** a + expected = pd.array([1, 1, 1, 1], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + result = pd.NA ** a + expected = pd.array([1, None, None, None], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + result = np.nan ** a + expected = np.array([1, np.nan, np.nan, np.nan], dtype="float64") + tm.assert_numpy_array_equal(result, expected) + + +def test_pow_array(): + a = integer_array([0, 0, 0, 1, 1, 1, None, None, None]) + b = integer_array([0, 1, None, 0, 1, None, 0, 1, None]) + result = a ** b + expected = integer_array([1, 0, None, 1, 1, 1, 1, None, None]) + tm.assert_extension_array_equal(result, expected) + + +def test_rpow_one_to_na(): + # https://github.com/pandas-dev/pandas/issues/22022 + # https://github.com/pandas-dev/pandas/issues/29997 + arr = integer_array([np.nan, np.nan]) + result = np.array([1.0, 2.0]) ** arr + expected = np.array([1.0, np.nan]) + tm.assert_numpy_array_equal(result, expected) + + +# Test equivalence of scalars, numpy arrays with array ops +# ----------------------------------------------------------------------------- + + +def test_array_scalar_like_equivalence(data, all_arithmetic_operators): + op = get_op_from_name(all_arithmetic_operators) + + scalar = 2 + scalar_array = pd.array([2] * len(data), dtype=data.dtype) + + # TODO also add len-1 array (np.array([2], dtype=data.dtype.numpy_dtype)) + for scalar in [2, data.dtype.type(2)]: + result = op(data, scalar) + expected = op(data, scalar_array) + if isinstance(expected, ExtensionArray): + tm.assert_extension_array_equal(result, expected) else: - rs = pd.Series(s.values._data, name=s.name) - expected = op(rs, other) - self._check_op_integer(result, expected, mask, s, op_name, other) - - def _check_op_float(self, result, expected, mask, s, op_name, other): - # check comparisons that are resulting in float dtypes - - expected[mask] = np.nan - if "floordiv" in op_name: - # Series op sets 1//0 to np.inf, which IntegerArray does not do (yet) - mask2 = np.isinf(expected) & np.isnan(result) - expected[mask2] = np.nan - tm.assert_series_equal(result, expected) - - def _check_op_integer(self, result, expected, mask, s, op_name, other): - # check comparisons that are resulting in integer dtypes - - # to compare properly, we convert the expected - # to float, mask to nans and convert infs - # if we have uints then we process as uints - # then convert to float - # and we ultimately want to create a IntArray - # for comparisons - - fill_value = 0 - - # mod/rmod turn floating 0 into NaN while - # integer works as expected (no nan) - if op_name in ["__mod__", "__rmod__"]: - if is_scalar(other): - if other == 0: - expected[s.values == 0] = 0 - else: - expected = expected.fillna(0) - else: - expected[ - (s.values == 0).fillna(False) - & ((expected == 0).fillna(False) | expected.isna()) - ] = 0 - try: - expected[ - ((expected == np.inf) | (expected == -np.inf)).fillna(False) - ] = fill_value - original = expected - expected = expected.astype(s.dtype) - - except ValueError: - - expected = expected.astype(float) - expected[ - ((expected == np.inf) | (expected == -np.inf)).fillna(False) - ] = fill_value - original = expected - expected = expected.astype(s.dtype) - - expected[mask] = pd.NA - - # assert that the expected astype is ok - # (skip for unsigned as they have wrap around) - if not s.dtype.is_unsigned_integer: - original = pd.Series(original) - - # we need to fill with 0's to emulate what an astype('int') does - # (truncation) for certain ops - if op_name in ["__rtruediv__", "__rdiv__"]: - mask |= original.isna() - original = original.fillna(0).astype("int") - - original = original.astype("float") - original[mask] = np.nan - tm.assert_series_equal(original, expected.astype("float")) - - # assert our expected result - tm.assert_series_equal(result, expected) - - def test_arith_integer_array(self, data, all_arithmetic_operators): - # we operate with a rhs of an integer array - - op = all_arithmetic_operators + # TODO div still gives float ndarray -> remove this once we have Float EA + tm.assert_numpy_array_equal(result, expected) - s = pd.Series(data) - rhs = pd.Series([1] * len(data), dtype=data.dtype) - rhs.iloc[-1] = np.nan - self._check_op(s, op, rhs) +def test_array_NA(data, all_arithmetic_operators): + if "truediv" in all_arithmetic_operators: + pytest.skip("division with pd.NA raises") + op = get_op_from_name(all_arithmetic_operators) - def test_arith_series_with_scalar(self, data, all_arithmetic_operators): - # scalar - op = all_arithmetic_operators - s = pd.Series(data) - self._check_op(s, op, 1, exc=TypeError) + scalar = pd.NA + scalar_array = pd.array([pd.NA] * len(data), dtype=data.dtype) - def test_arith_frame_with_scalar(self, data, all_arithmetic_operators): - # frame & scalar - op = all_arithmetic_operators - df = pd.DataFrame({"A": data}) - self._check_op(df, op, 1, exc=TypeError) + result = op(data, scalar) + expected = op(data, scalar_array) + tm.assert_extension_array_equal(result, expected) - def test_arith_series_with_array(self, data, all_arithmetic_operators): - # ndarray & other series - op = all_arithmetic_operators - s = pd.Series(data) - other = np.ones(len(s), dtype=s.dtype.type) - self._check_op(s, op, other, exc=TypeError) - def test_arith_coerce_scalar(self, data, all_arithmetic_operators): +def test_numpy_array_equivalence(data, all_arithmetic_operators): + op = get_op_from_name(all_arithmetic_operators) - op = all_arithmetic_operators - s = pd.Series(data) + numpy_array = np.array([2] * len(data), dtype=data.dtype.numpy_dtype) + pd_array = pd.array(numpy_array, dtype=data.dtype) + + result = op(data, numpy_array) + expected = op(data, pd_array) + if isinstance(expected, ExtensionArray): + tm.assert_extension_array_equal(result, expected) + else: + # TODO div still gives float ndarray -> remove this once we have Float EA + tm.assert_numpy_array_equal(result, expected) - other = 0.01 - self._check_op(s, op, other) - @pytest.mark.parametrize("other", [1.0, np.array(1.0)]) - def test_arithmetic_conversion(self, all_arithmetic_operators, other): - # if we have a float operand we should have a float result - # if that is equal to an integer - op = self.get_op_from_name(all_arithmetic_operators) +@pytest.mark.parametrize("other", [0, 0.5]) +def test_numpy_zero_dim_ndarray(other): + arr = integer_array([1, None, 2]) + result = arr + np.array(other) + expected = arr + other + tm.assert_equal(result, expected) - s = pd.Series([1, 2, 3], dtype="Int64") - result = op(s, other) - assert result.dtype is np.dtype("float") - def test_arith_len_mismatch(self, all_arithmetic_operators): - # operating with a list-like with non-matching length raises - op = self.get_op_from_name(all_arithmetic_operators) - other = np.array([1.0]) +# Test equivalence with Series and DataFrame ops +# ----------------------------------------------------------------------------- - s = pd.Series([1, 2, 3], dtype="Int64") - with pytest.raises(ValueError, match="Lengths must match"): - op(s, other) - @pytest.mark.parametrize("other", [0, 0.5]) - def test_arith_zero_dim_ndarray(self, other): - arr = integer_array([1, None, 2]) - result = arr + np.array(other) - expected = arr + other - tm.assert_equal(result, expected) +def test_frame(data, all_arithmetic_operators): + op = get_op_from_name(all_arithmetic_operators) - def test_error(self, data, all_arithmetic_operators): - # invalid ops + # DataFrame with scalar + df = pd.DataFrame({"A": data}) + scalar = 2 - op = all_arithmetic_operators - s = pd.Series(data) - ops = getattr(s, op) - opa = getattr(data, op) + result = op(df, scalar) + expected = pd.DataFrame({"A": op(data, scalar)}) + tm.assert_frame_equal(result, expected) + + +def test_series(data, all_arithmetic_operators): + op = get_op_from_name(all_arithmetic_operators) + + s = pd.Series(data) + + # Series with scalar + scalar = 2 + result = op(s, scalar) + expected = pd.Series(op(data, scalar)) + tm.assert_series_equal(result, expected) + + # Series with np.ndarray + other = np.ones(len(data), dtype=data.dtype.type) + result = op(s, other) + expected = pd.Series(op(data, other)) + tm.assert_series_equal(result, expected) - # invalid scalars + # Series with pd.array + other = pd.array(np.ones(len(data)), dtype=data.dtype) + result = op(s, other) + expected = pd.Series(op(data, other)) + tm.assert_series_equal(result, expected) + + # Series with Series + other = pd.Series(np.ones(len(data)), dtype=data.dtype) + result = op(s, other) + expected = pd.Series(op(data, other.array)) + tm.assert_series_equal(result, expected) + + +# Test generic charachteristics / errors +# ----------------------------------------------------------------------------- + + +def test_error_invalid_values(data, all_arithmetic_operators): + + op = all_arithmetic_operators + s = pd.Series(data) + ops = getattr(s, op) + + # invalid scalars + msg = ( + r"(:?can only perform ops with numeric values)" + r"|(:?IntegerArray cannot perform the operation mod)" + ) + with pytest.raises(TypeError, match=msg): + ops("foo") + with pytest.raises(TypeError, match=msg): + ops(pd.Timestamp("20180101")) + + # invalid array-likes + with pytest.raises(TypeError, match=msg): + ops(pd.Series("foo", index=s.index)) + + if op != "__rpow__": + # TODO(extension) + # rpow with a datetimelike coerces the integer array incorrectly msg = ( - r"(:?can only perform ops with numeric values)" - r"|(:?IntegerArray cannot perform the operation mod)" + "can only perform ops with numeric values|" + "cannot perform .* with this index type: DatetimeArray|" + "Addition/subtraction of integers and integer-arrays " + "with DatetimeArray is no longer supported. *" ) with pytest.raises(TypeError, match=msg): - ops("foo") - with pytest.raises(TypeError, match=msg): - ops(pd.Timestamp("20180101")) + ops(pd.Series(pd.date_range("20180101", periods=len(s)))) - # invalid array-likes - with pytest.raises(TypeError, match=msg): - ops(pd.Series("foo", index=s.index)) - - if op != "__rpow__": - # TODO(extension) - # rpow with a datetimelike coerces the integer array incorrectly - msg = ( - "can only perform ops with numeric values|" - "cannot perform .* with this index type: DatetimeArray|" - "Addition/subtraction of integers and integer-arrays " - "with DatetimeArray is no longer supported. *" - ) - with pytest.raises(TypeError, match=msg): - ops(pd.Series(pd.date_range("20180101", periods=len(s)))) - - # 2d - result = opa(pd.DataFrame({"A": s})) - assert result is NotImplemented - - msg = r"can only perform ops with 1-d structures" - with pytest.raises(NotImplementedError, match=msg): - opa(np.arange(len(s)).reshape(-1, len(s))) - - @pytest.mark.parametrize("zero, negative", [(0, False), (0.0, False), (-0.0, True)]) - def test_divide_by_zero(self, zero, negative): - # https://github.com/pandas-dev/pandas/issues/27398 - a = pd.array([0, 1, -1, None], dtype="Int64") - result = a / zero - expected = np.array([np.nan, np.inf, -np.inf, np.nan]) - if negative: - expected *= -1 - tm.assert_numpy_array_equal(result, expected) - def test_pow_scalar(self): - a = pd.array([-1, 0, 1, None, 2], dtype="Int64") - result = a ** 0 - expected = pd.array([1, 1, 1, 1, 1], dtype="Int64") - tm.assert_extension_array_equal(result, expected) +def test_error_invalid_object(data, all_arithmetic_operators): - result = a ** 1 - expected = pd.array([-1, 0, 1, None, 2], dtype="Int64") - tm.assert_extension_array_equal(result, expected) + op = all_arithmetic_operators + opa = getattr(data, op) - result = a ** pd.NA - expected = pd.array([None, None, 1, None, None], dtype="Int64") - tm.assert_extension_array_equal(result, expected) + # 2d -> return NotImplemented + result = opa(pd.DataFrame({"A": data})) + assert result is NotImplemented - result = a ** np.nan - expected = np.array([np.nan, np.nan, 1, np.nan, np.nan], dtype="float64") - tm.assert_numpy_array_equal(result, expected) + msg = r"can only perform ops with 1-d structures" + with pytest.raises(NotImplementedError, match=msg): + opa(np.arange(len(data)).reshape(-1, len(data))) - # reversed - a = a[1:] # Can't raise integers to negative powers. - result = 0 ** a - expected = pd.array([1, 0, None, 0], dtype="Int64") - tm.assert_extension_array_equal(result, expected) +def test_error_len_mismatch(all_arithmetic_operators): + # operating with a list-like with non-matching length raises + op = get_op_from_name(all_arithmetic_operators) - result = 1 ** a - expected = pd.array([1, 1, 1, 1], dtype="Int64") - tm.assert_extension_array_equal(result, expected) + data = pd.array([1, 2, 3], dtype="Int64") - result = pd.NA ** a - expected = pd.array([1, None, None, None], dtype="Int64") - tm.assert_extension_array_equal(result, expected) + for other in [[1, 2], np.array([1.0, 2.0])]: + with pytest.raises(ValueError, match="Lengths must match"): + op(data, other) - result = np.nan ** a - expected = np.array([1, np.nan, np.nan, np.nan], dtype="float64") - tm.assert_numpy_array_equal(result, expected) + s = pd.Series(data) + with pytest.raises(ValueError, match="Lengths must match"): + op(s, other) - def test_pow_array(self): - a = integer_array([0, 0, 0, 1, 1, 1, None, None, None]) - b = integer_array([0, 1, None, 0, 1, None, 0, 1, None]) - result = a ** b - expected = integer_array([1, 0, None, 1, 1, 1, 1, None, None]) - tm.assert_extension_array_equal(result, expected) - def test_rpow_one_to_na(self): - # https://github.com/pandas-dev/pandas/issues/22022 - # https://github.com/pandas-dev/pandas/issues/29997 - arr = integer_array([np.nan, np.nan]) - result = np.array([1.0, 2.0]) ** arr - expected = np.array([1.0, np.nan]) - tm.assert_numpy_array_equal(result, expected) +# Various +# ----------------------------------------------------------------------------- + + +# TODO test unsigned overflow + + +def test_arith_coerce_scalar(data, all_arithmetic_operators): + op = get_op_from_name(all_arithmetic_operators) + s = pd.Series(data) + other = 0.01 + + result = op(s, other) + expected = op(s.astype(float), other) + # rfloordiv results in nan instead of inf + if all_arithmetic_operators == "__rfloordiv__": + expected[(expected == np.inf) | (expected == -np.inf)] = np.nan + + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("other", [1.0, np.array(1.0)]) +def test_arithmetic_conversion(all_arithmetic_operators, other): + # if we have a float operand we should have a float result + # if that is equal to an integer + op = get_op_from_name(all_arithmetic_operators) + + s = pd.Series([1, 2, 3], dtype="Int64") + result = op(s, other) + assert result.dtype is np.dtype("float") def test_cross_type_arithmetic():
This is an attempt to make the arithmetic tests for the masked arrays more understandable and maintainable (doing it here for IntegerArray as a start, but the idea would be to do the same for BooleanArray, and later FloatingArray as well, and at that point also share some of those tests). The problem with the current `arrays/integer/test_arithmetic.py` is that is quite complex to see what is going on (which I experienced when making a version for FloatingArray in https://github.com/pandas-dev/pandas/pull/34307): for example, there is a huge `_check_op` method that has all the logic to created the expected result, but so this also has all the special cases of all ops combined, making it very difficult to see what is going on or to know what is now exactly tested for a certain op. The reason for this structure is that those tests originally came from the base extension tests in `tests/extension/`, where the tests needed to be very generic. However, we already moved out those tests for IntgerArray, since they were all customized (nothing was still being inherited from the base class), but so we also don't need to maintain the original class structure then. The logic how I constructed the new tests: - I first test each `op` with an explicitly constructed expected result. Here, we test the IntegerArray-specific special cases (like things as `1 ** pd.NA`, or division resulting in a float numpy array, ..). Explicitly writing it down makes it a lot easier to read and to see what is tested (no complex `if op == ...: ..,` constructs, and no complex correcting of expected results based on ndarray) - Those first tests are all with EAs. In a next set of tests, I added tests for array+scalar ops, array+ndarray ops, ... But here, I just rely on creating the expected result with the EA itself (since EA+EA ops were already tested before) - Similar tests for frame+scalar, series+scalar, series+array with the EA dtypes -> rely on the actual EA op for the expected result instead of having `_check_op` handle that as well. cc @dsaxton @jreback
https://api.github.com/repos/pandas-dev/pandas/pulls/34454
2020-05-29T11:53:24Z
2020-06-06T08:29:20Z
2020-06-06T08:29:20Z
2020-06-06T08:29:54Z
[ENH] Allow pad, backfill and cumcount in groupby.transform
diff --git a/doc/source/reference/frame.rst b/doc/source/reference/frame.rst index 12b9f67ddb846..e3dfb552651a0 100644 --- a/doc/source/reference/frame.rst +++ b/doc/source/reference/frame.rst @@ -47,8 +47,6 @@ Conversion DataFrame.convert_dtypes DataFrame.infer_objects DataFrame.copy - DataFrame.isna - DataFrame.notna DataFrame.bool Indexing, iteration @@ -211,10 +209,18 @@ Missing data handling .. autosummary:: :toctree: api/ + DataFrame.backfill + DataFrame.bfill DataFrame.dropna + DataFrame.ffill DataFrame.fillna - DataFrame.replace DataFrame.interpolate + DataFrame.isna + DataFrame.isnull + DataFrame.notna + DataFrame.notnull + DataFrame.pad + DataFrame.replace Reshaping, sorting, transposing ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/reference/groupby.rst b/doc/source/reference/groupby.rst index ca444dac9d77d..5f6bef2579d27 100644 --- a/doc/source/reference/groupby.rst +++ b/doc/source/reference/groupby.rst @@ -50,6 +50,7 @@ Computations / descriptive stats GroupBy.all GroupBy.any GroupBy.bfill + GroupBy.backfill GroupBy.count GroupBy.cumcount GroupBy.cummax @@ -67,6 +68,7 @@ Computations / descriptive stats GroupBy.ngroup GroupBy.nth GroupBy.ohlc + GroupBy.pad GroupBy.prod GroupBy.rank GroupBy.pct_change @@ -88,10 +90,12 @@ application to columns of a specific data type. DataFrameGroupBy.all DataFrameGroupBy.any + DataFrameGroupBy.backfill DataFrameGroupBy.bfill DataFrameGroupBy.corr DataFrameGroupBy.count DataFrameGroupBy.cov + DataFrameGroupBy.cumcount DataFrameGroupBy.cummax DataFrameGroupBy.cummin DataFrameGroupBy.cumprod @@ -106,6 +110,7 @@ application to columns of a specific data type. DataFrameGroupBy.idxmin DataFrameGroupBy.mad DataFrameGroupBy.nunique + DataFrameGroupBy.pad DataFrameGroupBy.pct_change DataFrameGroupBy.plot DataFrameGroupBy.quantile diff --git a/doc/source/reference/series.rst b/doc/source/reference/series.rst index 797ade9594c7d..3b595ba5ab206 100644 --- a/doc/source/reference/series.rst +++ b/doc/source/reference/series.rst @@ -214,11 +214,18 @@ Missing data handling .. autosummary:: :toctree: api/ - Series.isna - Series.notna + Series.backfill + Series.bfill Series.dropna + Series.ffill Series.fillna Series.interpolate + Series.isna + Series.isnull + Series.notna + Series.notnull + Series.pad + Series.replace Reshaping, sorting ------------------ diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 88bf0e005a221..59e7c466dab96 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -288,6 +288,7 @@ Other enhancements - :meth:`HDFStore.put` now accepts `track_times` parameter. Parameter is passed to ``create_table`` method of ``PyTables`` (:issue:`32682`). - Make :class:`pandas.core.window.Rolling` and :class:`pandas.core.window.Expanding` iterable(:issue:`11704`) - Make ``option_context`` a :class:`contextlib.ContextDecorator`, which allows it to be used as a decorator over an entire function (:issue:`34253`). +- :meth:`groupby.transform` now allows ``func`` to be ``pad``, ``backfill`` and ``cumcount`` (:issue:`31269`). .. --------------------------------------------------------------------------- diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 0260f30b9e7e2..ee7dfd4c25a89 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6193,6 +6193,8 @@ def ffill( method="ffill", axis=axis, inplace=inplace, limit=limit, downcast=downcast ) + pad = ffill + def bfill( self: FrameOrSeries, axis=None, @@ -6212,6 +6214,8 @@ def bfill( method="bfill", axis=axis, inplace=inplace, limit=limit, downcast=downcast ) + backfill = bfill + @doc(klass=_shared_doc_kwargs["klass"]) def replace( self, diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index ea4b6f4e65341..e47cfddeeaad7 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -483,6 +483,8 @@ def transform(self, func, *args, engine="cython", engine_kwargs=None, **kwargs): elif func in base.cythonized_kernels: # cythonized transform or canned "agg+broadcast" return getattr(self, func)(*args, **kwargs) + elif func in base.transformation_kernels: + return getattr(self, func)(*args, **kwargs) # If func is a reduction, we need to broadcast the # result to the whole group. Compute func result @@ -1464,6 +1466,8 @@ def transform(self, func, *args, engine="cython", engine_kwargs=None, **kwargs): elif func in base.cythonized_kernels: # cythonized transformation or canned "reduction+broadcast" return getattr(self, func)(*args, **kwargs) + elif func in base.transformation_kernels: + return getattr(self, func)(*args, **kwargs) # GH 30918 # Use _transform_fast only when we know func is an aggregation diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py index b3347b3c64e6c..e7bc3801a08a7 100644 --- a/pandas/tests/groupby/transform/test_transform.py +++ b/pandas/tests/groupby/transform/test_transform.py @@ -323,15 +323,22 @@ def test_transform_transformation_func(transformation_func): { "A": ["foo", "foo", "foo", "foo", "bar", "bar", "baz"], "B": [1, 2, np.nan, 3, 3, np.nan, 4], - } + }, + index=pd.date_range("2020-01-01", "2020-01-07"), ) - if transformation_func in ["pad", "backfill", "tshift", "cumcount"]: - # These transformation functions are not yet covered in this test - pytest.xfail("See GH 31269") + if transformation_func == "cumcount": + test_op = lambda x: x.transform("cumcount") + mock_op = lambda x: Series(range(len(x)), x.index) elif transformation_func == "fillna": test_op = lambda x: x.transform("fillna", value=0) mock_op = lambda x: x.fillna(value=0) + elif transformation_func == "tshift": + msg = ( + "Current behavior of groupby.tshift is inconsistent with other " + "transformations. See GH34452 for more details" + ) + pytest.xfail(msg) else: test_op = lambda x: x.transform(transformation_func) mock_op = lambda x: getattr(x, transformation_func)() @@ -340,7 +347,10 @@ def test_transform_transformation_func(transformation_func): groups = [df[["B"]].iloc[:4], df[["B"]].iloc[4:6], df[["B"]].iloc[6:]] expected = concat([mock_op(g) for g in groups]) - tm.assert_frame_equal(result, expected) + if transformation_func == "cumcount": + tm.assert_series_equal(result, expected) + else: + tm.assert_frame_equal(result, expected) def test_transform_select_columns(df):
- [x] closes #31269 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/34453
2020-05-29T10:32:25Z
2020-06-03T23:28:12Z
2020-06-03T23:28:11Z
2020-06-04T04:06:48Z
Fix MultiIndex .loc "Too Many Indexers" with None as return value
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 5ef1f9dea5091..9c00bc5ea7434 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -811,6 +811,7 @@ Indexing - Bug in :meth:`DataFrame.truncate` and :meth:`Series.truncate` where index was assumed to be monotone increasing (:issue:`33756`) - Indexing with a list of strings representing datetimes failed on :class:`DatetimeIndex` or :class:`PeriodIndex`(:issue:`11278`) - Bug in :meth:`Series.at` when used with a :class:`MultiIndex` would raise an exception on valid inputs (:issue:`26989`) +- Bug in :meth:`Series.loc` when used with a :class:`MultiIndex` would raise an IndexingError when accessing a None value (:issue:`34318`) Missing ^^^^^^^ diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 3a146bb0438c5..ee0e9d32c4601 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -766,9 +766,11 @@ def _getitem_lowerdim(self, tup: Tuple): # ...but iloc should handle the tuple as simple integer-location # instead of checking it as multiindex representation (GH 13797) if isinstance(ax0, ABCMultiIndex) and self.name != "iloc": - result = self._handle_lowerdim_multi_index_axis0(tup) - if result is not None: + try: + result = self._handle_lowerdim_multi_index_axis0(tup) return result + except IndexingError: + pass if len(tup) > self.ndim: raise IndexingError("Too many indexers. handle elsewhere") @@ -816,9 +818,11 @@ def _getitem_nested_tuple(self, tup: Tuple): if self.name != "loc": # This should never be reached, but lets be explicit about it raise ValueError("Too many indices") - result = self._handle_lowerdim_multi_index_axis0(tup) - if result is not None: + try: + result = self._handle_lowerdim_multi_index_axis0(tup) return result + except IndexingError: + pass # this is a series with a multi-index specified a tuple of # selectors @@ -1065,7 +1069,7 @@ def _handle_lowerdim_multi_index_axis0(self, tup: Tuple): if len(tup) <= self.obj.index.nlevels and len(tup) > self.ndim: raise ek - return None + raise IndexingError("No label returned") def _getitem_axis(self, key, axis: int): key = item_from_zerodim(key) diff --git a/pandas/tests/series/indexing/test_multiindex.py b/pandas/tests/series/indexing/test_multiindex.py new file mode 100644 index 0000000000000..e98a32d62b767 --- /dev/null +++ b/pandas/tests/series/indexing/test_multiindex.py @@ -0,0 +1,22 @@ +""" test get/set & misc """ + + +import pandas as pd +from pandas import MultiIndex, Series + + +def test_access_none_value_in_multiindex(): + # GH34318: test that you can access a None value using .loc through a Multiindex + + s = Series([None], pd.MultiIndex.from_arrays([["Level1"], ["Level2"]])) + result = s.loc[("Level1", "Level2")] + assert result is None + + midx = MultiIndex.from_product([["Level1"], ["Level2_a", "Level2_b"]]) + s = Series([None] * len(midx), dtype=object, index=midx) + result = s.loc[("Level1", "Level2_a")] + assert result is None + + s = Series([1] * len(midx), dtype=object, index=midx) + result = s.loc[("Level1", "Level2_a")] + assert result == 1
- [x] closes #34318 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Three changes in indexing.py : - function _handle_lowerdim_multi_index_axis0 default return value was None, now it raises an Exception. - function _getitem_lowerdim checks for a raised exception instead of a value None. - function _getitem_nested_tuple checks for a raised exception instead of a value None(had to be changed because it also uses _handle_lowerdim_multi_index_axis0)
https://api.github.com/repos/pandas-dev/pandas/pulls/34450
2020-05-29T02:21:59Z
2020-06-03T18:02:23Z
2020-06-03T18:02:23Z
2020-06-03T18:02:27Z
BUG: Period.to_timestamp not matching PeriodArray.to_timestamp
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index b2b7eb000a2f3..bc190825214c1 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -1702,10 +1702,7 @@ cdef class _Period: @property def end_time(self) -> Timestamp: - # freq.n can't be negative or 0 - # ordinal = (self + self.freq.n).start_time.value - 1 - ordinal = (self + self.freq).start_time.value - 1 - return Timestamp(ordinal) + return self.to_timestamp(how="end") def to_timestamp(self, freq=None, how='start', tz=None) -> Timestamp: """ @@ -1727,18 +1724,22 @@ cdef class _Period: ------- Timestamp """ - if freq is not None: - freq = self._maybe_convert_freq(freq) how = validate_end_alias(how) end = how == 'E' if end: + if freq == "B" or self.freq == "B": + # roll forward to ensure we land on B date + adjust = Timedelta(1, "D") - Timedelta(1, "ns") + return self.to_timestamp(how="start") + adjust endpoint = (self + self.freq).to_timestamp(how='start') return endpoint - Timedelta(1, 'ns') if freq is None: base, mult = get_freq_code(self.freq) freq = get_to_timestamp_base(base) + else: + freq = self._maybe_convert_freq(freq) base, mult = get_freq_code(freq) val = self.asfreq(freq, how) diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 4601e7fa5389e..3d4b42de01810 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -430,7 +430,7 @@ def to_timestamp(self, freq=None, how="start"): end = how == "E" if end: - if freq == "B": + if freq == "B" or self.freq == "B": # roll forward to ensure we land on B date adjust = Timedelta(1, "D") - Timedelta(1, "ns") return self.to_timestamp(how="start") + adjust diff --git a/pandas/tests/indexes/period/test_scalar_compat.py b/pandas/tests/indexes/period/test_scalar_compat.py index 0f92b7a4e168b..e9d17e7e20778 100644 --- a/pandas/tests/indexes/period/test_scalar_compat.py +++ b/pandas/tests/indexes/period/test_scalar_compat.py @@ -17,3 +17,12 @@ def test_end_time(self): expected_index = date_range("2016-01-01", end="2016-05-31", freq="M") expected_index += Timedelta(1, "D") - Timedelta(1, "ns") tm.assert_index_equal(index.end_time, expected_index) + + def test_end_time_business_friday(self): + # GH#34449 + pi = period_range("1990-01-05", freq="B", periods=1) + result = pi.end_time + + dti = date_range("1990-01-05", freq="D", periods=1)._with_freq(None) + expected = dti + Timedelta(days=1, nanoseconds=-1) + tm.assert_index_equal(result, expected) diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py index e81f2ee55eebd..41909b4b1a9bb 100644 --- a/pandas/tests/scalar/period/test_period.py +++ b/pandas/tests/scalar/period/test_period.py @@ -589,6 +589,8 @@ def test_to_timestamp(self): from_lst = ["A", "Q", "M", "W", "B", "D", "H", "Min", "S"] def _ex(p): + if p.freq == "B": + return p.start_time + Timedelta(days=1, nanoseconds=-1) return Timestamp((p + p.freq).start_time.value - 1) for i, fcode in enumerate(from_lst): @@ -632,6 +634,13 @@ def _ex(p): result = p.to_timestamp("5S", how="start") assert result == expected + def test_to_timestamp_business_end(self): + per = pd.Period("1990-01-05", "B") # Friday + result = per.to_timestamp("B", how="E") + + expected = pd.Timestamp("1990-01-06") - pd.Timedelta(nanoseconds=1) + assert result == expected + # -------------------------------------------------------------- # Rendering: __repr__, strftime, etc @@ -786,6 +795,14 @@ def _ex(*args): xp = _ex(2012, 1, 2, 1) assert xp == p.end_time + def test_end_time_business_friday(self): + # GH#34449 + per = Period("1990-01-05", "B") + result = per.end_time + + expected = pd.Timestamp("1990-01-06") - pd.Timedelta(nanoseconds=1) + assert result == expected + def test_anchor_week_end_time(self): def _ex(*args): return Timestamp(Timestamp(datetime(*args)).value - 1)
- [ ] closes #xxxx - [x] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry With this change, I _think_ we can move to share some methods between Period and PeriodArray.
https://api.github.com/repos/pandas-dev/pandas/pulls/34449
2020-05-29T01:39:17Z
2020-06-01T15:40:17Z
2020-06-01T15:40:17Z
2020-06-01T16:34:25Z
BUG: ensure_timedelta64ns overflows
diff --git a/doc/source/reference/general_utility_functions.rst b/doc/source/reference/general_utility_functions.rst index 72a84217323ab..c1759110b94ad 100644 --- a/doc/source/reference/general_utility_functions.rst +++ b/doc/source/reference/general_utility_functions.rst @@ -43,6 +43,7 @@ Exceptions and warnings errors.NullFrequencyError errors.NumbaUtilError errors.OutOfBoundsDatetime + errors.OutOfBoundsTimedelta errors.ParserError errors.ParserWarning errors.PerformanceWarning diff --git a/pandas/_libs/tslibs/__init__.py b/pandas/_libs/tslibs/__init__.py index 0ae4cc97d07e3..7723140e3eab1 100644 --- a/pandas/_libs/tslibs/__init__.py +++ b/pandas/_libs/tslibs/__init__.py @@ -7,6 +7,7 @@ "nat_strings", "is_null_datetimelike", "OutOfBoundsDatetime", + "OutOfBoundsTimedelta", "IncompatibleFrequency", "Period", "Resolution", @@ -26,7 +27,7 @@ ] from . import dtypes -from .conversion import localize_pydatetime +from .conversion import OutOfBoundsTimedelta, localize_pydatetime from .dtypes import Resolution from .nattype import NaT, NaTType, iNaT, is_null_datetimelike, nat_strings from .np_datetime import OutOfBoundsDatetime diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 31d2d0e9572f5..85da7a60a029a 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -51,6 +51,15 @@ DT64NS_DTYPE = np.dtype('M8[ns]') TD64NS_DTYPE = np.dtype('m8[ns]') +class OutOfBoundsTimedelta(ValueError): + """ + Raised when encountering a timedelta value that cannot be represented + as a timedelta64[ns]. + """ + # Timedelta analogue to OutOfBoundsDatetime + pass + + # ---------------------------------------------------------------------- # Unit Conversion Helpers @@ -228,11 +237,34 @@ def ensure_timedelta64ns(arr: ndarray, copy: bool=True): Returns ------- - result : ndarray with dtype timedelta64[ns] - + ndarray[timedelta64[ns]] """ - return arr.astype(TD64NS_DTYPE, copy=copy) - # TODO: check for overflows when going from a lower-resolution to nanos + assert arr.dtype.kind == "m", arr.dtype + + if arr.dtype == TD64NS_DTYPE: + return arr.copy() if copy else arr + + # Re-use the datetime64 machinery to do an overflow-safe `astype` + dtype = arr.dtype.str.replace("m8", "M8") + dummy = arr.view(dtype) + try: + dt64_result = ensure_datetime64ns(dummy, copy) + except OutOfBoundsDatetime as err: + # Re-write the exception in terms of timedelta64 instead of dt64 + + # Find the value that we are going to report as causing an overflow + tdmin = arr.min() + tdmax = arr.max() + if np.abs(tdmin) >= np.abs(tdmax): + bad_val = tdmin + else: + bad_val = tdmax + + raise OutOfBoundsTimedelta( + f"Out of bounds for nanosecond {arr.dtype.name} {bad_val}" + ) + + return dt64_result.view(TD64NS_DTYPE) # ---------------------------------------------------------------------- diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index d8779dae7c384..6a4b3318d3aa7 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -2302,7 +2302,8 @@ class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock): def __init__(self, values, placement, ndim=None): if values.dtype != TD64NS_DTYPE: - values = conversion.ensure_timedelta64ns(values) + # e.g. non-nano or int64 + values = TimedeltaArray._from_sequence(values)._data if isinstance(values, TimedeltaArray): values = values._data assert isinstance(values, np.ndarray), type(values) diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py index e3427d93f3d84..6ac3004d29996 100644 --- a/pandas/errors/__init__.py +++ b/pandas/errors/__init__.py @@ -6,7 +6,7 @@ from pandas._config.config import OptionError -from pandas._libs.tslibs import OutOfBoundsDatetime +from pandas._libs.tslibs import OutOfBoundsDatetime, OutOfBoundsTimedelta class NullFrequencyError(ValueError): diff --git a/pandas/tests/tslibs/test_api.py b/pandas/tests/tslibs/test_api.py index 036037032031a..eca444c9ceb34 100644 --- a/pandas/tests/tslibs/test_api.py +++ b/pandas/tests/tslibs/test_api.py @@ -32,6 +32,7 @@ def test_namespace(): "is_null_datetimelike", "nat_strings", "OutOfBoundsDatetime", + "OutOfBoundsTimedelta", "Period", "IncompatibleFrequency", "Resolution", diff --git a/pandas/tests/tslibs/test_conversion.py b/pandas/tests/tslibs/test_conversion.py index b35940c6bb95b..4f184b78f34a1 100644 --- a/pandas/tests/tslibs/test_conversion.py +++ b/pandas/tests/tslibs/test_conversion.py @@ -4,7 +4,13 @@ import pytest from pytz import UTC -from pandas._libs.tslibs import conversion, iNaT, timezones, tzconversion +from pandas._libs.tslibs import ( + OutOfBoundsTimedelta, + conversion, + iNaT, + timezones, + tzconversion, +) from pandas import Timestamp, date_range import pandas._testing as tm @@ -89,6 +95,13 @@ def test_ensure_datetime64ns_bigendian(): tm.assert_numpy_array_equal(result, expected) +def test_ensure_timedelta64ns_overflows(): + arr = np.arange(10).astype("m8[Y]") * 100 + msg = r"Out of bounds for nanosecond timedelta64\[Y\] 900" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + conversion.ensure_timedelta64ns(arr) + + class SubDatetime(datetime): pass
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/34448
2020-05-29T00:50:28Z
2020-07-10T22:19:55Z
2020-07-10T22:19:55Z
2020-07-10T23:36:58Z
REF: move offset_to_period_map from liboffsets
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 30a1490fdf862..d3baeba72e81e 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -30,7 +30,7 @@ from pandas._libs.tslibs.util cimport is_integer_object, is_datetime64_object from pandas._libs.tslibs.base cimport ABCTimestamp from pandas._libs.tslibs.ccalendar import ( - MONTHS, DAYS, MONTH_ALIASES, MONTH_TO_CAL_NUM, weekday_to_int, int_to_weekday, + MONTH_ALIASES, MONTH_TO_CAL_NUM, weekday_to_int, int_to_weekday, ) from pandas._libs.tslibs.ccalendar cimport get_days_in_month, dayofweek from pandas._libs.tslibs.conversion cimport ( @@ -45,53 +45,6 @@ from pandas._libs.tslibs.tzconversion cimport tz_convert_single from .timedeltas cimport delta_to_nanoseconds -# --------------------------------------------------------------------- -# Constants - -_offset_to_period_map = { - 'WEEKDAY': 'D', - 'EOM': 'M', - 'BM': 'M', - 'BQS': 'Q', - 'QS': 'Q', - 'BQ': 'Q', - 'BA': 'A', - 'AS': 'A', - 'BAS': 'A', - 'MS': 'M', - 'D': 'D', - 'C': 'C', - 'B': 'B', - 'T': 'T', - 'S': 'S', - 'L': 'L', - 'U': 'U', - 'N': 'N', - 'H': 'H', - 'Q': 'Q', - 'A': 'A', - 'W': 'W', - 'M': 'M', - 'Y': 'A', - 'BY': 'A', - 'YS': 'A', - 'BYS': 'A'} - -need_suffix = ['QS', 'BQ', 'BQS', 'YS', 'AS', 'BY', 'BA', 'BYS', 'BAS'] - -for __prefix in need_suffix: - for _m in MONTHS: - key = f'{__prefix}-{_m}' - _offset_to_period_map[key] = _offset_to_period_map[__prefix] - -for __prefix in ['A', 'Q']: - for _m in MONTHS: - _alias = f'{__prefix}-{_m}' - _offset_to_period_map[_alias] = _alias - -for _d in DAYS: - _offset_to_period_map[f'W-{_d}'] = f'W-{_d}' - # --------------------------------------------------------------------- # Misc Helpers diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 47ae66ac4f91b..7516d9748c18f 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -5,13 +5,18 @@ from pandas._libs.algos import unique_deltas from pandas._libs.tslibs import Timestamp -from pandas._libs.tslibs.ccalendar import MONTH_ALIASES, MONTH_NUMBERS, int_to_weekday +from pandas._libs.tslibs.ccalendar import ( + DAYS, + MONTH_ALIASES, + MONTH_NUMBERS, + MONTHS, + int_to_weekday, +) from pandas._libs.tslibs.fields import build_field_sarray from pandas._libs.tslibs.offsets import ( # noqa:F401 DateOffset, Day, _get_offset, - _offset_to_period_map, to_offset, ) from pandas._libs.tslibs.parsing import get_rule_month @@ -39,6 +44,51 @@ # --------------------------------------------------------------------- # Offset names ("time rules") and related functions +_offset_to_period_map = { + "WEEKDAY": "D", + "EOM": "M", + "BM": "M", + "BQS": "Q", + "QS": "Q", + "BQ": "Q", + "BA": "A", + "AS": "A", + "BAS": "A", + "MS": "M", + "D": "D", + "C": "C", + "B": "B", + "T": "T", + "S": "S", + "L": "L", + "U": "U", + "N": "N", + "H": "H", + "Q": "Q", + "A": "A", + "W": "W", + "M": "M", + "Y": "A", + "BY": "A", + "YS": "A", + "BYS": "A", +} + +_need_suffix = ["QS", "BQ", "BQS", "YS", "AS", "BY", "BA", "BYS", "BAS"] + +for _prefix in _need_suffix: + for _m in MONTHS: + key = f"{_prefix}-{_m}" + _offset_to_period_map[key] = _offset_to_period_map[_prefix] + +for _prefix in ["A", "Q"]: + for _m in MONTHS: + _alias = f"{_prefix}-{_m}" + _offset_to_period_map[_alias] = _alias + +for _d in DAYS: + _offset_to_period_map[f"W-{_d}"] = f"W-{_d}" + def get_period_alias(offset_str: str) -> Optional[str]: """
its only used in tseries.frequencies, so this puts it back there
https://api.github.com/repos/pandas-dev/pandas/pulls/34447
2020-05-28T23:43:06Z
2020-05-29T01:00:40Z
2020-05-29T01:00:40Z
2020-05-29T01:10:38Z
CLN: liboffsets de-duplicate pickle code
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index a27b0903e9d75..b804ed883e693 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -63,7 +63,7 @@ cdef datetime _as_datetime(datetime obj): return obj -cpdef bint is_normalized(datetime dt): +cdef bint _is_normalized(datetime dt): if dt.hour != 0 or dt.minute != 0 or dt.second != 0 or dt.microsecond != 0: # Regardless of whether dt is datetime vs Timestamp return False @@ -233,7 +233,7 @@ cpdef int get_firstbday(int year, int month) nogil: return first -def _get_calendar(weekmask, holidays, calendar): +cdef _get_calendar(weekmask, holidays, calendar): """Generate busdaycalendar""" if isinstance(calendar, np.busdaycalendar): if not holidays: @@ -252,7 +252,7 @@ def _get_calendar(weekmask, holidays, calendar): holidays = holidays + calendar.holidays().tolist() except AttributeError: pass - holidays = [to_dt64D(dt) for dt in holidays] + holidays = [_to_dt64D(dt) for dt in holidays] holidays = tuple(sorted(holidays)) kwargs = {'weekmask': weekmask} @@ -263,7 +263,7 @@ def _get_calendar(weekmask, holidays, calendar): return busdaycalendar, holidays -def to_dt64D(dt): +cdef _to_dt64D(dt): # Currently # > np.datetime64(dt.datetime(2013,5,1),dtype='datetime64[D]') # numpy.datetime64('2013-05-01T02:00:00.000000+0200') @@ -286,7 +286,7 @@ def to_dt64D(dt): # Validation -def _validate_business_time(t_input): +cdef _validate_business_time(t_input): if isinstance(t_input, str): try: t = time.strptime(t_input, '%H:%M') @@ -311,7 +311,7 @@ _relativedelta_kwds = {"years", "months", "weeks", "days", "year", "month", "minutes", "seconds", "microseconds"} -def _determine_offset(kwds): +cdef _determine_offset(kwds): # timedelta is used for sub-daily plural offsets and all singular # offsets relativedelta is used for plural offsets of daily length or # more nanosecond(s) are handled by apply_wraps @@ -357,7 +357,7 @@ cdef class BaseOffset: """ _typ = "dateoffset" _day_opt = None - _attributes = frozenset(['n', 'normalize']) + _attributes = tuple(["n", "normalize"]) _use_relativedelta = False _adjust_dst = True _deprecations = frozenset(["isAnchored", "onOffset"]) @@ -400,7 +400,6 @@ cdef class BaseOffset: Returns a tuple containing all of the attributes needed to evaluate equality between two DateOffset objects. """ - # NB: non-cython subclasses override property with cache_readonly d = getattr(self, "__dict__", {}) all_paras = d.copy() all_paras["n"] = self.n @@ -614,7 +613,7 @@ cdef class BaseOffset: return get_day_of_month(other, self._day_opt) def is_on_offset(self, dt) -> bool: - if self.normalize and not is_normalized(dt): + if self.normalize and not _is_normalized(dt): return False # Default (slow) method for determining if some date is a member of the @@ -658,63 +657,22 @@ cdef class BaseOffset: def __setstate__(self, state): """Reconstruct an instance from a pickled state""" - if isinstance(self, MonthOffset): - # We can't just override MonthOffset.__setstate__ because of the - # combination of MRO resolution and cython not handling - # multiple inheritance nicely for cdef classes. - state.pop("_use_relativedelta", False) - state.pop("offset", None) - state.pop("_offset", None) - state.pop("kwds", {}) - - if 'offset' in state: - # Older (<0.22.0) versions have offset attribute instead of _offset - if '_offset' in state: # pragma: no cover - raise AssertionError('Unexpected key `_offset`') - state['_offset'] = state.pop('offset') - state['kwds']['offset'] = state['_offset'] - - if '_offset' in state and not isinstance(state['_offset'], timedelta): - # relativedelta, we need to populate using its kwds - offset = state['_offset'] - odict = offset.__dict__ - kwds = {key: odict[key] for key in odict if odict[key]} - state.update(kwds) - self.n = state.pop("n") self.normalize = state.pop("normalize") self._cache = state.pop("_cache", {}) - - if not len(state): - # FIXME: kludge because some classes no longer have a __dict__, - # so we need to short-circuit before raising on the next line - return - - self.__dict__.update(state) - - if 'weekmask' in state and 'holidays' in state: - weekmask = state.pop("weekmask") - holidays = state.pop("holidays") - calendar, holidays = _get_calendar(weekmask=weekmask, - holidays=holidays, - calendar=None) - self.calendar = calendar - self.holidays = holidays + # At this point we expect state to be empty def __getstate__(self): """Return a pickleable state""" - state = getattr(self, "__dict__", {}).copy() + state = {} state["n"] = self.n state["normalize"] = self.normalize # we don't want to actually pickle the calendar object # as its a np.busyday; we recreate on deserialization - if 'calendar' in state: - del state['calendar'] - try: - state['kwds'].pop('calendar') - except KeyError: - pass + state.pop("calendar", None) + if "kwds" in state: + state["kwds"].pop("calendar", None) return state @@ -752,6 +710,17 @@ cdef class SingleConstructorOffset(BaseOffset): raise ValueError(f"Bad freq suffix {suffix}") return cls() + def __reduce__(self): + # This __reduce__ implementation is for all BaseOffset subclasses + # except for RelativeDeltaOffset + # np.busdaycalendar objects do not pickle nicely, but we can reconstruct + # from attributes that do get pickled. + tup = tuple( + getattr(self, attr) if attr != "calendar" else None + for attr in self._attributes + ) + return type(self), tup + # --------------------------------------------------------------------- # Tick Offsets @@ -761,7 +730,7 @@ cdef class Tick(SingleConstructorOffset): __array_priority__ = 1000 _adjust_dst = False _prefix = "undefined" - _attributes = frozenset(["n", "normalize"]) + _attributes = tuple(["n", "normalize"]) def __init__(self, n=1, normalize=False): n = self._validate_n(n) @@ -883,9 +852,6 @@ cdef class Tick(SingleConstructorOffset): # -------------------------------------------------------------------- # Pickle Methods - def __reduce__(self): - return (type(self), (self.n,)) - def __setstate__(self, state): self.n = state["n"] self.normalize = False @@ -955,7 +921,7 @@ cdef class RelativeDeltaOffset(BaseOffset): """ DateOffset subclass backed by a dateutil relativedelta object. """ - _attributes = frozenset(["n", "normalize"] + list(_relativedelta_kwds)) + _attributes = tuple(["n", "normalize"] + list(_relativedelta_kwds)) _adjust_dst = False def __init__(self, n=1, normalize=False, **kwds): @@ -968,6 +934,38 @@ cdef class RelativeDeltaOffset(BaseOffset): val = kwds[key] object.__setattr__(self, key, val) + def __getstate__(self): + """Return a pickleable state""" + # RelativeDeltaOffset (technically DateOffset) is the only non-cdef + # class, so the only one with __dict__ + state = self.__dict__.copy() + state["n"] = self.n + state["normalize"] = self.normalize + return state + + def __setstate__(self, state): + """Reconstruct an instance from a pickled state""" + + if "offset" in state: + # Older (<0.22.0) versions have offset attribute instead of _offset + if "_offset" in state: # pragma: no cover + raise AssertionError("Unexpected key `_offset`") + state["_offset"] = state.pop("offset") + state["kwds"]["offset"] = state["_offset"] + + if "_offset" in state and not isinstance(state["_offset"], timedelta): + # relativedelta, we need to populate using its kwds + offset = state["_offset"] + odict = offset.__dict__ + kwds = {key: odict[key] for key in odict if odict[key]} + state.update(kwds) + + self.n = state.pop("n") + self.normalize = state.pop("normalize") + self._cache = state.pop("_cache", {}) + + self.__dict__.update(state) + @apply_wraps def apply(self, other): if self._use_relativedelta: @@ -1060,7 +1058,7 @@ cdef class RelativeDeltaOffset(BaseOffset): ) def is_on_offset(self, dt) -> bool: - if self.normalize and not is_normalized(dt): + if self.normalize and not _is_normalized(dt): return False # TODO: see GH#1395 return True @@ -1234,6 +1232,7 @@ cdef class BusinessMixin(SingleConstructorOffset): if "_offset" in state: self._offset = state.pop("_offset") elif "offset" in state: + # Older (<0.22.0) versions have offset attribute instead of _offset self._offset = state.pop("offset") if self._prefix.startswith("C"): @@ -1256,7 +1255,7 @@ cdef class BusinessDay(BusinessMixin): """ _prefix = "B" - _attributes = frozenset(["n", "normalize", "offset"]) + _attributes = tuple(["n", "normalize", "offset"]) cpdef __setstate__(self, state): self.n = state.pop("n") @@ -1266,10 +1265,6 @@ cdef class BusinessDay(BusinessMixin): elif "offset" in state: self._offset = state.pop("offset") - def __reduce__(self): - tup = (self.n, self.normalize, self.offset) - return type(self), tup - @property def _params(self): # FIXME: using cache_readonly breaks a pytables test @@ -1371,7 +1366,7 @@ cdef class BusinessDay(BusinessMixin): return result def is_on_offset(self, dt) -> bool: - if self.normalize and not is_normalized(dt): + if self.normalize and not _is_normalized(dt): return False return dt.weekday() < 5 @@ -1383,7 +1378,7 @@ cdef class BusinessHour(BusinessMixin): _prefix = "BH" _anchor = 0 - _attributes = frozenset(["n", "normalize", "start", "end", "offset"]) + _attributes = tuple(["n", "normalize", "start", "end", "offset"]) _adjust_dst = False cdef readonly: @@ -1450,9 +1445,6 @@ cdef class BusinessHour(BusinessMixin): state.pop("next_bday", None) BusinessMixin.__setstate__(self, state) - def __reduce__(self): - return type(self), (self.n, self.normalize, self.start, self.end, self.offset) - def _repr_attrs(self) -> str: out = super()._repr_attrs() hours = ",".join( @@ -1505,7 +1497,6 @@ cdef class BusinessHour(BusinessMixin): nb_offset = -1 if self._prefix.startswith("C"): # CustomBusinessHour - from pandas.tseries.offsets import CustomBusinessDay return CustomBusinessDay( n=nb_offset, weekmask=self.weekmask, @@ -1662,7 +1653,6 @@ cdef class BusinessHour(BusinessMixin): if bd != 0: if self._prefix.startswith("C"): # GH#30593 this is a Custom offset - from pandas.tseries.offsets import CustomBusinessDay skip_bd = CustomBusinessDay( n=bd, weekmask=self.weekmask, @@ -1722,7 +1712,7 @@ cdef class BusinessHour(BusinessMixin): raise ApplyTypeError("Only know how to combine business hour with datetime") def is_on_offset(self, dt): - if self.normalize and not is_normalized(dt): + if self.normalize and not _is_normalized(dt): return False if dt.tzinfo is not None: @@ -1737,7 +1727,7 @@ cdef class BusinessHour(BusinessMixin): """ Slight speedups using calculated values. """ - # if self.normalize and not is_normalized(dt): + # if self.normalize and not _is_normalized(dt): # return False # Valid BH can be on the different BusinessDay during midnight # Distinguish by the time spent from previous opening time @@ -1786,7 +1776,7 @@ cdef class WeekOfMonthMixin(SingleConstructorOffset): return shift_day(shifted, to_day - shifted.day) def is_on_offset(self, dt) -> bool: - if self.normalize and not is_normalized(dt): + if self.normalize and not _is_normalized(dt): return False return dt.day == self._get_offset_day(dt) @@ -1806,7 +1796,7 @@ cdef class YearOffset(SingleConstructorOffset): """ DateOffset that just needs a month. """ - _attributes = frozenset(["n", "normalize", "month"]) + _attributes = tuple(["n", "normalize", "month"]) # _default_month: int # FIXME: python annotation here breaks things @@ -1828,9 +1818,6 @@ cdef class YearOffset(SingleConstructorOffset): self.normalize = state.pop("normalize") self._cache = {} - def __reduce__(self): - return type(self), (self.n, self.normalize, self.month) - @classmethod def _from_name(cls, suffix=None): kwargs = {} @@ -1844,7 +1831,7 @@ cdef class YearOffset(SingleConstructorOffset): return f"{self._prefix}-{month}" def is_on_offset(self, dt) -> bool: - if self.normalize and not is_normalized(dt): + if self.normalize and not _is_normalized(dt): return False return dt.month == self.month and dt.day == self._get_offset_day(dt) @@ -1943,7 +1930,7 @@ cdef class YearBegin(YearOffset): # Quarter-Based Offset Classes cdef class QuarterOffset(SingleConstructorOffset): - _attributes = frozenset(["n", "normalize", "startingMonth"]) + _attributes = tuple(["n", "normalize", "startingMonth"]) # TODO: Consider combining QuarterOffset and YearOffset __init__ at some # point. Also apply_index, is_on_offset, rule_code if # startingMonth vs month attr names are resolved @@ -1967,9 +1954,6 @@ cdef class QuarterOffset(SingleConstructorOffset): self.n = state.pop("n") self.normalize = state.pop("normalize") - def __reduce__(self): - return type(self), (self.n, self.normalize, self.startingMonth) - @classmethod def _from_name(cls, suffix=None): kwargs = {} @@ -1989,7 +1973,7 @@ cdef class QuarterOffset(SingleConstructorOffset): return self.n == 1 and self.startingMonth is not None def is_on_offset(self, dt) -> bool: - if self.normalize and not is_normalized(dt): + if self.normalize and not _is_normalized(dt): return False mod_month = (dt.month - self.startingMonth) % 3 return mod_month == 0 and dt.day == self._get_offset_day(dt) @@ -2106,7 +2090,7 @@ cdef class QuarterBegin(QuarterOffset): cdef class MonthOffset(SingleConstructorOffset): def is_on_offset(self, dt) -> bool: - if self.normalize and not is_normalized(dt): + if self.normalize and not _is_normalized(dt): return False return dt.day == self._get_offset_day(dt) @@ -2121,6 +2105,14 @@ cdef class MonthOffset(SingleConstructorOffset): shifted = shift_months(dtindex.asi8, self.n, self._day_opt) return type(dtindex)._simple_new(shifted, dtype=dtindex.dtype) + cpdef __setstate__(self, state): + state.pop("_use_relativedelta", False) + state.pop("offset", None) + state.pop("_offset", None) + state.pop("kwds", {}) + + BaseOffset.__setstate__(self, state) + cdef class MonthEnd(MonthOffset): """ @@ -2182,7 +2174,7 @@ cdef class BusinessMonthBegin(MonthOffset): cdef class SemiMonthOffset(SingleConstructorOffset): _default_day_of_month = 15 _min_day_of_month = 2 - _attributes = frozenset(["n", "normalize", "day_of_month"]) + _attributes = tuple(["n", "normalize", "day_of_month"]) cdef readonly: int day_of_month @@ -2201,9 +2193,6 @@ cdef class SemiMonthOffset(SingleConstructorOffset): f"got {self.day_of_month}" ) - def __reduce__(self): - return type(self), (self.n, self.normalize, self.day_of_month) - cpdef __setstate__(self, state): self.n = state.pop("n") self.normalize = state.pop("normalize") @@ -2310,7 +2299,7 @@ cdef class SemiMonthEnd(SemiMonthOffset): _min_day_of_month = 1 def is_on_offset(self, dt) -> bool: - if self.normalize and not is_normalized(dt): + if self.normalize and not _is_normalized(dt): return False days_in_month = get_days_in_month(dt.year, dt.month) return dt.day in (self.day_of_month, days_in_month) @@ -2370,7 +2359,7 @@ cdef class SemiMonthBegin(SemiMonthOffset): _prefix = "SMS" def is_on_offset(self, dt) -> bool: - if self.normalize and not is_normalized(dt): + if self.normalize and not _is_normalized(dt): return False return dt.day in (1, self.day_of_month) @@ -2428,7 +2417,7 @@ cdef class Week(SingleConstructorOffset): _inc = timedelta(weeks=1) _prefix = "W" - _attributes = frozenset(["n", "normalize", "weekday"]) + _attributes = tuple(["n", "normalize", "weekday"]) cdef readonly: object weekday # int or None @@ -2441,9 +2430,6 @@ cdef class Week(SingleConstructorOffset): if self.weekday < 0 or self.weekday > 6: raise ValueError(f"Day must be 0<=day<=6, got {self.weekday}") - def __reduce__(self): - return type(self), (self.n, self.normalize, self.weekday) - cpdef __setstate__(self, state): self.n = state.pop("n") self.normalize = state.pop("normalize") @@ -2529,7 +2515,7 @@ cdef class Week(SingleConstructorOffset): return base + off + Timedelta(1, "ns") - Timedelta(1, "D") def is_on_offset(self, dt) -> bool: - if self.normalize and not is_normalized(dt): + if self.normalize and not _is_normalized(dt): return False elif self.weekday is None: return True @@ -2575,7 +2561,7 @@ cdef class WeekOfMonth(WeekOfMonthMixin): """ _prefix = "WOM" - _attributes = frozenset(["n", "normalize", "week", "weekday"]) + _attributes = tuple(["n", "normalize", "week", "weekday"]) def __init__(self, n=1, normalize=False, week=0, weekday=0): WeekOfMonthMixin.__init__(self, n, normalize, weekday) @@ -2590,9 +2576,6 @@ cdef class WeekOfMonth(WeekOfMonthMixin): self.weekday = state.pop("weekday") self.week = state.pop("week") - def __reduce__(self): - return type(self), (self.n, self.normalize, self.week, self.weekday) - def _get_offset_day(self, other: datetime) -> int: """ Find the day in the same month as other that has the same @@ -2643,7 +2626,7 @@ cdef class LastWeekOfMonth(WeekOfMonthMixin): """ _prefix = "LWOM" - _attributes = frozenset(["n", "normalize", "weekday"]) + _attributes = tuple(["n", "normalize", "weekday"]) def __init__(self, n=1, normalize=False, weekday=0): WeekOfMonthMixin.__init__(self, n, normalize, weekday) @@ -2652,9 +2635,6 @@ cdef class LastWeekOfMonth(WeekOfMonthMixin): if self.n == 0: raise ValueError("N cannot be 0") - def __reduce__(self): - return type(self), (self.n, self.normalize, self.weekday) - cpdef __setstate__(self, state): self.n = state.pop("n") self.normalize = state.pop("normalize") @@ -2793,14 +2773,10 @@ cdef class FY5253(FY5253Mixin): """ _prefix = "RE" - _attributes = frozenset(["weekday", "startingMonth", "variation"]) - - def __reduce__(self): - tup = (self.n, self.normalize, self.weekday, self.startingMonth, self.variation) - return type(self), tup + _attributes = tuple(["n", "normalize", "weekday", "startingMonth", "variation"]) def is_on_offset(self, dt: datetime) -> bool: - if self.normalize and not is_normalized(dt): + if self.normalize and not _is_normalized(dt): return False dt = datetime(dt.year, dt.month, dt.day) year_end = self.get_year_end(dt) @@ -2975,8 +2951,15 @@ cdef class FY5253Quarter(FY5253Mixin): """ _prefix = "REQ" - _attributes = frozenset( - ["weekday", "startingMonth", "qtr_with_extra_week", "variation"] + _attributes = tuple( + [ + "n", + "normalize", + "weekday", + "startingMonth", + "qtr_with_extra_week", + "variation", + ] ) cdef readonly: @@ -3000,17 +2983,6 @@ cdef class FY5253Quarter(FY5253Mixin): FY5253Mixin.__setstate__(self, state) self.qtr_with_extra_week = state.pop("qtr_with_extra_week") - def __reduce__(self): - tup = ( - self.n, - self.normalize, - self.weekday, - self.startingMonth, - self.qtr_with_extra_week, - self.variation, - ) - return type(self), tup - @cache_readonly def _offset(self): return FY5253( @@ -3122,7 +3094,7 @@ cdef class FY5253Quarter(FY5253Mixin): return weeks_in_year == 53 def is_on_offset(self, dt: datetime) -> bool: - if self.normalize and not is_normalized(dt): + if self.normalize and not _is_normalized(dt): return False if self._offset.is_on_offset(dt): return True @@ -3158,9 +3130,6 @@ cdef class Easter(SingleConstructorOffset): Right now uses the revised method which is valid in years 1583-4099. """ - def __reduce__(self): - return type(self), (self.n, self.normalize) - cpdef __setstate__(self, state): self.n = state.pop("n") self.normalize = state.pop("normalize") @@ -3195,7 +3164,7 @@ cdef class Easter(SingleConstructorOffset): return new def is_on_offset(self, dt: datetime) -> bool: - if self.normalize and not is_normalized(dt): + if self.normalize and not _is_normalized(dt): return False return date(dt.year, dt.month, dt.day) == easter(dt.year) @@ -3223,16 +3192,10 @@ cdef class CustomBusinessDay(BusinessDay): """ _prefix = "C" - _attributes = frozenset( + _attributes = tuple( ["n", "normalize", "weekmask", "holidays", "calendar", "offset"] ) - def __reduce__(self): - # np.holidaycalendar cant be pickled, so pass None there and - # it will be re-constructed within __init__ - tup = (self.n, self.normalize, self.weekmask, self.holidays, None, self.offset) - return type(self), tup - def __init__( self, n=1, @@ -3280,13 +3243,13 @@ cdef class CustomBusinessDay(BusinessDay): "datetime, datetime64 or timedelta." ) - def apply_index(self, i): + def apply_index(self, dtindex): raise NotImplementedError def is_on_offset(self, dt: datetime) -> bool: - if self.normalize and not is_normalized(dt): + if self.normalize and not _is_normalized(dt): return False - day64 = to_dt64D(dt) + day64 = _to_dt64D(dt) return np.is_busday(day64, busdaycal=self.calendar) @@ -3297,7 +3260,7 @@ cdef class CustomBusinessHour(BusinessHour): _prefix = "CBH" _anchor = 0 - _attributes = frozenset( + _attributes = tuple( ["n", "normalize", "weekmask", "holidays", "calendar", "start", "end", "offset"] ) @@ -3315,22 +3278,6 @@ cdef class CustomBusinessHour(BusinessHour): BusinessHour.__init__(self, n, normalize, start=start, end=end, offset=offset) self._init_custom(weekmask, holidays, calendar) - def __reduce__(self): - # None for self.calendar bc np.busdaycalendar doesnt pickle nicely - return ( - type(self), - ( - self.n, - self.normalize, - self.weekmask, - self.holidays, - None, - self.start, - self.end, - self.offset, - ), - ) - cdef class _CustomBusinessMonth(BusinessMixin): """ @@ -3355,7 +3302,7 @@ cdef class _CustomBusinessMonth(BusinessMixin): Time offset to apply. """ - _attributes = frozenset( + _attributes = tuple( ["n", "normalize", "weekmask", "holidays", "calendar", "offset"] ) @@ -3371,13 +3318,6 @@ cdef class _CustomBusinessMonth(BusinessMixin): BusinessMixin.__init__(self, n, normalize, offset) self._init_custom(weekmask, holidays, calendar) - def __reduce__(self): - # None for self.calendar bc np.busdaycalendar doesnt pickle nicely - return ( - type(self), - (self.n, self.normalize, self.weekmask, self.holidays, None, self.offset), - ) - @cache_readonly def cbday_roll(self): """ diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 9d191ba8e6681..fb888bcba1608 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -1509,8 +1509,6 @@ cdef class _Period: int64_t ordinal object freq - _typ = 'period' - def __cinit__(self, ordinal, freq): self.ordinal = ordinal self.freq = freq
privatize/cdef functions that are now only used in liboffsets
https://api.github.com/repos/pandas-dev/pandas/pulls/34446
2020-05-28T23:40:59Z
2020-05-29T12:24:23Z
2020-05-29T12:24:23Z
2020-05-29T14:45:18Z
CLN: simplify to_offset
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 30a1490fdf862..855fc014bfe7e 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -3532,16 +3532,6 @@ prefix_mapping = { ] } -_name_to_offset_map = { - "days": Day(1), - "hours": Hour(1), - "minutes": Minute(1), - "seconds": Second(1), - "milliseconds": Milli(1), - "microseconds": Micro(1), - "nanoseconds": Nano(1), -} - # hack to handle WOM-1MON opattern = re.compile( r"([+\-]?\d*|[+\-]?\d*\.\d*)\s*([A-Za-z]+([\-][\dA-Za-z\-]+)?)" @@ -3695,26 +3685,12 @@ cpdef to_offset(freq): delta = _get_offset(name) * stride elif isinstance(freq, timedelta): - from .timedeltas import Timedelta - - delta = None - freq = Timedelta(freq) - try: - for name in freq.components._fields: - offset = _name_to_offset_map[name] - stride = getattr(freq.components, name) - if stride != 0: - offset = stride * offset - if delta is None: - delta = offset - else: - delta = delta + offset - except ValueError as err: - raise ValueError(INVALID_FREQ_ERR_MSG.format(freq)) from err + return delta_to_tick(freq) - else: + elif isinstance(freq, str): delta = None stride_sign = None + try: split = re.split(opattern, freq) if split[-1] != "" and not split[-1].isspace(): @@ -3744,6 +3720,8 @@ cpdef to_offset(freq): delta = delta + offset except (ValueError, TypeError) as err: raise ValueError(INVALID_FREQ_ERR_MSG.format(freq)) from err + else: + delta = None if delta is None: raise ValueError(INVALID_FREQ_ERR_MSG.format(freq)) diff --git a/pandas/tests/tseries/frequencies/test_to_offset.py b/pandas/tests/tseries/frequencies/test_to_offset.py index beaefe9109e91..d3510eaa5c749 100644 --- a/pandas/tests/tseries/frequencies/test_to_offset.py +++ b/pandas/tests/tseries/frequencies/test_to_offset.py @@ -137,6 +137,7 @@ def test_to_offset_leading_plus(freqstr, expected): (dict(hours=1), offsets.Hour(1)), (dict(hours=1), frequencies.to_offset("60min")), (dict(microseconds=1), offsets.Micro(1)), + (dict(microseconds=0), offsets.Nano(0)), ], ) def test_to_offset_pd_timedelta(kwargs, expected): @@ -146,15 +147,6 @@ def test_to_offset_pd_timedelta(kwargs, expected): assert result == expected -def test_to_offset_pd_timedelta_invalid(): - # see gh-9064 - msg = "Invalid frequency: 0 days 00:00:00" - td = Timedelta(microseconds=0) - - with pytest.raises(ValueError, match=msg): - frequencies.to_offset(td) - - @pytest.mark.parametrize( "shortcut,expected", [
https://api.github.com/repos/pandas-dev/pandas/pulls/34444
2020-05-28T22:33:51Z
2020-05-29T00:58:45Z
2020-05-29T00:58:45Z
2020-05-29T01:09:34Z
TST #28981 comparison operation for interval dtypes
diff --git a/pandas/tests/arithmetic/test_interval.py b/pandas/tests/arithmetic/test_interval.py index 66526204a6208..50b5fe8e6f6b9 100644 --- a/pandas/tests/arithmetic/test_interval.py +++ b/pandas/tests/arithmetic/test_interval.py @@ -284,3 +284,11 @@ def test_index_series_compat(self, op, constructor, expected_type, assert_func): result = op(index, other) expected = expected_type(self.elementwise_comparison(op, index, other)) assert_func(result, expected) + + @pytest.mark.parametrize("scalars", ["a", False, 1, 1.0, None]) + def test_comparison_operations(self, scalars): + # GH #28981 + expected = Series([False, False]) + s = pd.Series([pd.Interval(0, 1), pd.Interval(1, 2)], dtype="interval") + result = s == scalars + tm.assert_series_equal(result, expected)
As a complement of #28980, one test was modified to take into account this issue: - [ x ] closes #28981 - [ 1 ] test modified - [ x ] passes `black pandas` - [ x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/34443
2020-05-28T21:41:00Z
2020-06-03T18:04:41Z
2020-06-03T18:04:40Z
2020-06-07T09:03:01Z
CLN: clearer lookups for period accessors
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index ec6f8de159dae..e722ca6f5a56c 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -1331,15 +1331,15 @@ cdef int pdays_in_month(int64_t ordinal, int freq): @cython.wraparound(False) @cython.boundscheck(False) -def get_period_field_arr(int code, const int64_t[:] arr, int freq): +def get_period_field_arr(str field, const int64_t[:] arr, int freq): cdef: Py_ssize_t i, sz int64_t[:] out accessor f - func = _get_accessor_func(code) + func = _get_accessor_func(field) if func is NULL: - raise ValueError(f"Unrecognized period code: {code}") + raise ValueError(f"Unrecognized field name: {field}") sz = len(arr) out = np.empty(sz, dtype=np.int64) @@ -1353,30 +1353,30 @@ def get_period_field_arr(int code, const int64_t[:] arr, int freq): return out.base # .base to access underlying np.ndarray -cdef accessor _get_accessor_func(int code): - if code == 0: +cdef accessor _get_accessor_func(str field): + if field == "year": return <accessor>pyear - elif code == 1: + elif field == "qyear": return <accessor>pqyear - elif code == 2: + elif field == "quarter": return <accessor>pquarter - elif code == 3: + elif field == "month": return <accessor>pmonth - elif code == 4: + elif field == "day": return <accessor>pday - elif code == 5: + elif field == "hour": return <accessor>phour - elif code == 6: + elif field == "minute": return <accessor>pminute - elif code == 7: + elif field == "second": return <accessor>psecond - elif code == 8: + elif field == "week": return <accessor>pweek - elif code == 9: + elif field == "day_of_year": return <accessor>pday_of_year - elif code == 10: + elif field == "weekday": return <accessor>pweekday - elif code == 11: + elif field == "days_in_month": return <accessor>pdays_in_month return NULL @@ -1429,12 +1429,8 @@ def extract_freq(ndarray[object] values): for i in range(n): value = values[i] - try: - # now Timestamp / NaT has freq attr - if is_period_object(value): - return value.freq - except AttributeError: - pass + if is_period_object(value): + return value.freq raise ValueError('freq not specified and cannot be inferred') diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 8b2925b2c0827..4601e7fa5389e 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -49,10 +49,10 @@ from pandas.tseries.offsets import DateOffset -def _field_accessor(name: str, alias: int, docstring=None): +def _field_accessor(name: str, docstring=None): def f(self): - base, mult = libfrequencies.get_freq_code(self.freq) - result = get_period_field_arr(alias, self.asi8, base) + base, _ = libfrequencies.get_freq_code(self.freq) + result = get_period_field_arr(name, self.asi8, base) return result f.__name__ = name @@ -324,80 +324,69 @@ def __arrow_array__(self, type=None): year = _field_accessor( "year", - 0, """ The year of the period. """, ) month = _field_accessor( "month", - 3, """ The month as January=1, December=12. """, ) day = _field_accessor( "day", - 4, """ The days of the period. """, ) hour = _field_accessor( "hour", - 5, """ The hour of the period. """, ) minute = _field_accessor( "minute", - 6, """ The minute of the period. """, ) second = _field_accessor( "second", - 7, """ The second of the period. """, ) weekofyear = _field_accessor( "week", - 8, """ The week ordinal of the year. """, ) week = weekofyear dayofweek = _field_accessor( - "dayofweek", - 10, + "weekday", """ The day of the week with Monday=0, Sunday=6. """, ) weekday = dayofweek dayofyear = day_of_year = _field_accessor( - "dayofyear", - 9, + "day_of_year", """ The ordinal day of the year. """, ) quarter = _field_accessor( "quarter", - 2, """ The quarter of the date. """, ) - qyear = _field_accessor("qyear", 1) + qyear = _field_accessor("qyear") days_in_month = _field_accessor( "days_in_month", - 11, """ The number of days in the month. """,
https://api.github.com/repos/pandas-dev/pandas/pulls/34442
2020-05-28T21:20:28Z
2020-05-28T22:19:37Z
2020-05-28T22:19:36Z
2020-05-28T22:20:24Z
Add date dtype
diff --git a/pandas/core/api.py b/pandas/core/api.py index b0b65f9d0be34..52b09355ab51f 100644 --- a/pandas/core/api.py +++ b/pandas/core/api.py @@ -8,6 +8,7 @@ DatetimeTZDtype, IntervalDtype, PeriodDtype, + DateDtype ) from pandas.core.dtypes.missing import isna, isnull, notna, notnull diff --git a/pandas/core/arrays/__init__.py b/pandas/core/arrays/__init__.py index 1d538824e6d82..bda19d552081c 100644 --- a/pandas/core/arrays/__init__.py +++ b/pandas/core/arrays/__init__.py @@ -13,6 +13,7 @@ from pandas.core.arrays.sparse import SparseArray from pandas.core.arrays.string_ import StringArray from pandas.core.arrays.timedeltas import TimedeltaArray +from pandas.core.arrays.dates import DateArray __all__ = [ "ExtensionArray", @@ -31,4 +32,5 @@ "SparseArray", "StringArray", "TimedeltaArray", + "DateArray" ] diff --git a/pandas/core/arrays/dates.py b/pandas/core/arrays/dates.py new file mode 100644 index 0000000000000..dadf3e269c443 --- /dev/null +++ b/pandas/core/arrays/dates.py @@ -0,0 +1,184 @@ +from pandas.core.dtypes.base import ExtensionDtype +from pandas.core.arrays.datetimelike import DatelikeOps, DatetimeLikeArrayMixin +from pandas.core.arrays.datetimes import sequence_to_dt64ns +from pandas.core.dtypes.common import ( + is_integer_dtype, + is_datetime64_dtype, + is_object_dtype, + is_string_dtype, + pandas_dtype, +) +from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass +from pandas.core.dtypes.dtypes import DateDtype +from pandas.core.construction import array +from pandas._libs.tslibs import Timestamp +from pandas._libs.tslibs.conversion import DT64NS_DTYPE +from pandas._libs import tslib, lib +from pandas.core.arrays._mixins import _T + +import numpy as np + +D_DATETIME_DTYPE = "datetime64[D]" +INTEGER_BACKEND = "i8" +VALID_TYPES = {INTEGER_BACKEND, "datetime64[ns]", D_DATETIME_DTYPE, "object"} + + +def _to_date_values(values, copy=False): + data, _, _ = sequence_to_dt64ns(values, copy=copy) + return data.astype(D_DATETIME_DTYPE) + + +class DateArray(DatetimeLikeArrayMixin, DatelikeOps): + """ + Pandas ExtensionArray for date (year, month, day only) data. + + Parameters + ---------- + values : Series, Index, DateArray, ndarray + The date data. + copy : bool, default False + Whether to copy the underlying array of values. + + Attributes + ---------- + None + + Methods + ------- + None + """ + + freq = "D" + + def __init__(self, values, copy=False): + if isinstance(values, (ABCSeries, ABCIndexClass)): + values = values._values + + if isinstance(values, type(self)): + values = values._data + + if not isinstance(values, np.ndarray): + msg = ( + f"Unexpected type '{type(values).__name__}'. 'values' must be " + "a DateArray ndarray, or Series or Index containing one of" + " those." + ) + raise ValueError(msg) + + if not self._is_compatible_dtype(values.dtype): + msg = ( + f"The dtype of 'values' is incorrect. Must be one of {VALID_TYPES}." + f" Got {values.dtype} instead." + ) + raise ValueError(msg) + + if values.dtype == INTEGER_BACKEND: + values = values.view(D_DATETIME_DTYPE) + elif values.dtype != "datetime64[D]": + values = _to_date_values(values, copy) + + if copy: + values = values.copy() + + self._data = values + + @staticmethod + def _is_compatible_dtype(dtype): + return ( + is_integer_dtype(dtype) + or is_object_dtype(dtype) + or is_datetime64_dtype(dtype) + or dtype == "datetime64[D]" + ) + + @classmethod + def _simple_new(cls, values, **kwargs): + assert isinstance(values, np.ndarray) + if values.dtype == INTEGER_BACKEND: + values = values.view(D_DATETIME_DTYPE) + + result = object.__new__(cls) + result._data = values + return result + + @classmethod + def _from_sequence(cls, scalars, dtype=None, copy=False): + """ + Construct a new ExtensionArray from a sequence of scalars. + + Parameters + ---------- + scalars : Sequence + Each element will be an instance of the scalar type for this + array, ``cls.dtype.type``. + dtype : dtype, optional + Construct for this particular dtype. This should be a Dtype + compatible with the ExtensionArray. + copy : bool, default False + If True, copy the underlying data. + + Returns + ------- + DateArray + """ + if ( + isinstance(scalars, np.ndarray) + and lib.infer_dtype(scalars, skipna=True) == "integer" + ): + values = scalars.astype(INTEGER_BACKEND) + elif is_integer_dtype(scalars): + values = scalars._data + else: + values = _to_date_values(scalars, copy) + return cls._simple_new(values) + + def _from_backing_data(self: _T, arr: np.ndarray) -> _T: + return type(self)(arr) + + @property + def dtype(self) -> ExtensionDtype: + return DateDtype() + + def __iter__(self): + for date_data in self._data: + yield date_data + + @property + def _box_func(self): + # TODO Implement Datestamp of a similar form in cython + return lambda x: Timestamp(x, freq="D", tz="utc") + + @property + def asi8(self) -> np.ndarray: + return self._data.view(INTEGER_BACKEND) + + @property + def as_datetime_i8(self) -> np.ndarray: + return self._data.astype(DT64NS_DTYPE).view(INTEGER_BACKEND) + + @property + def date(self): + timestamps = self.as_datetime_i8 + return tslib.ints_to_pydatetime(timestamps, box="date") + + def astype(self, dtype, copy=True): + dtype = pandas_dtype(dtype) + if isinstance(dtype, type(self.dtype)): + if copy: + return self.copy() + return self + if is_datetime64_dtype(dtype): + return array(self._data, dtype=DT64NS_DTYPE) + if is_object_dtype(dtype): + return self._box_values(self.as_datetime_i8) + if is_string_dtype(dtype): + return array(self._format_native_types()) + return super().astype(dtype, copy) + + def _format_native_types(self, na_rep="NaT", date_format=None): + return tslib.format_array_from_datetime( + self.as_datetime_i8, tz="utc", format="%Y-%m-%d", na_rep=na_rep + ) + + def __len__(self): + return len(self._data) diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 90088c370697e..6c8cd753854dc 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -29,6 +29,7 @@ is_datetime64_dtype, is_datetime64_ns_dtype, is_datetime64tz_dtype, + is_date_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, @@ -601,6 +602,8 @@ def astype(self, dtype, copy=True): return self elif is_period_dtype(dtype): return self.to_period(freq=dtype.freq) + elif is_date_dtype(dtype): + return dtype.construct_array_type()._from_sequence(self._data, copy) return dtl.DatetimeLikeArrayMixin.astype(self, dtype, copy) # ----------------------------------------------------------------- diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index 59954f548fd33..7f99bfee7b9fc 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -14,6 +14,7 @@ from pandas.core.dtypes.common import ( is_bool_dtype, is_datetime64_dtype, + is_date_dtype, is_float, is_float_dtype, is_integer, @@ -453,6 +454,8 @@ def astype(self, dtype, copy: bool = True) -> ArrayLike: elif isinstance(dtype, BooleanDtype): result = self._data.astype("bool", copy=False) return BooleanArray(result, mask=self._mask, copy=False) + elif is_date_dtype(dtype): + return dtype.construct_array_type()._from_sequence(self._data, copy=False) # coerce if is_float_dtype(dtype): diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 5b20b8e1b3be5..6cfab09deba91 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -14,6 +14,7 @@ from pandas.core.dtypes.dtypes import ( CategoricalDtype, DatetimeTZDtype, + DateDtype, ExtensionDtype, IntervalDtype, PeriodDtype, @@ -419,6 +420,38 @@ def is_datetime64tz_dtype(arr_or_dtype) -> bool: return DatetimeTZDtype.is_dtype(arr_or_dtype) +def is_date_dtype(arr_or_dtype) -> bool: + """ + Check whether an array-like or dtype is of the date dtype. + + Parameters + ---------- + arr_or_dtype : array-like + The array-like or dtype to check. + + Returns + ------- + boolean + Whether or not the array-like or dtype is of the date dtype. + + Examples + -------- + >>> is_date_dtype(object) + False + >>> is_date_dtype(np.datetime64) + False + >>> is_date_dtype(pd.Date64Dtype()) + True + >>> is_date_dtype([1, 2, 3]) + False + >>> is_date_dtype(pd.Series([], dtype="date")) + True + >>> is_date_dtype('0 days') + False + """ + return DateDtype.is_dtype(arr_or_dtype) + + def is_timedelta64_dtype(arr_or_dtype) -> bool: """ Check whether an array-like or dtype is of the timedelta64 dtype. diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 8aa146d613dc3..4c906b30b8a65 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -3,6 +3,7 @@ """ import re +import datetime from typing import ( TYPE_CHECKING, Any, @@ -34,6 +35,7 @@ IntervalArray, PeriodArray, DatetimeArray, + DateArray, ) from pandas import Categorical # noqa: F401 @@ -1232,3 +1234,88 @@ def __from_arrow__( results.append(iarr) return IntervalArray._concat_same_type(results) + + +@register_extension_dtype +class DateDtype(PandasExtensionDtype): + """ + An ExtensionDtype to hold a single date. + The attributes name & type are set when subclasses are created. + """ + + _date_aliases = {"date", "date64"} + _unit = "D" + _numpy_dtype = np.dtype("datetime64[D]") + + def __str__(self): + return "date" + + @property + def name(self) -> str_type: + return str(self) + + @property + def type(self): + return datetime.date + + @property + def na_value(self): + return NaT + + def __repr__(self): + return type(self) + + @property + def kind(self): + return self._numpy_dtype.kind + + @property + def itemsize(self): + """ Return the number of bytes in this dtype """ + return self.numpy_dtype.itemsize + + @classmethod + def construct_from_string(cls, string: str): + if not isinstance(string, str): + raise TypeError( + f"'construct_from_string' expects a string, got {type(string)}" + ) + + if string in cls._date_aliases: + return cls() + + msg = ( + f"Cannot construct a 'DateDtype' from '{string}'.\n\n" + "Incorrectly formatted string passed to constructor. " + "Valid formats include only date" + ) + raise TypeError(msg) + + @classmethod + def construct_array_type(cls): + """ + Return the array type associated with this dtype. + Returns + ------- + type + """ + from pandas.core.arrays import DateArray + + return DateArray + + # TODO make from arrow + + @classmethod + def is_dtype(cls, dtype) -> bool: + if isinstance(dtype, str): + if dtype.lower().startswith("date"): + try: + if cls.construct_from_string(dtype) is not None: + return True + else: + return False + except (ValueError, TypeError): + return False + else: + return False + return super().is_dtype(dtype) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index c052c6c9d7d1d..01db258131f54 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -33,6 +33,7 @@ is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype, + is_date_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, @@ -67,6 +68,7 @@ from pandas.core.arrays import ( Categorical, DatetimeArray, + DateArray, ExtensionArray, PandasArray, PandasDtype, @@ -97,6 +99,7 @@ class Block(PandasObject): is_float = False is_integer = False is_complex = False + is_date = False is_datetime = False is_datetimetz = False is_timedelta = False @@ -2034,8 +2037,16 @@ def shift(self, periods, axis=0, fill_value=None): new_values = values.shift(periods, fill_value=fill_value, axis=axis) return self.make_block_same_class(new_values) +class DateBlock(Block): + __slots__ = () + is_date = True + is_extension = True -class DatetimeBlock(DatetimeLikeBlockMixin, Block): + @property + def _holder(self): + return DateArray + +class DatetimeBlock(Block): __slots__ = () is_datetime = True @@ -2080,7 +2091,7 @@ def astype(self, dtype, copy: bool = False, errors: str = "raise"): dtype = pandas_dtype(dtype) # if we are passed a datetime64[ns, tz] - if is_datetime64tz_dtype(dtype): + if is_datetime64tz_dtype(dtype) and not is_date_dtype(dtype): values = self.values if copy: # this should be the only copy @@ -2676,6 +2687,8 @@ def get_block_type(values, dtype=None): elif issubclass(vtype, np.datetime64): assert not is_datetime64tz_dtype(values.dtype) cls = DatetimeBlock + elif is_date_dtype(values.dtype): + cls = DateBlock elif is_datetime64tz_dtype(values.dtype): cls = DatetimeTZBlock elif is_interval_dtype(dtype) or is_period_dtype(dtype): @@ -2713,12 +2726,10 @@ def make_block(values, placement, klass=None, ndim=None, dtype=None): if klass is None: dtype = dtype or values.dtype klass = get_block_type(values, dtype) - elif klass is DatetimeTZBlock and not is_datetime64tz_dtype(values.dtype): # TODO: This is no longer hit internally; does it need to be retained # for e.g. pyarrow? values = DatetimeArray._simple_new(values, dtype=dtype) - return klass(values, ndim=ndim, placement=placement) diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 02339f4344d4d..307fda9b86004 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -46,6 +46,7 @@ is_complex_dtype, is_datetime64_dtype, is_datetime64tz_dtype, + is_date_dtype, is_extension_array_dtype, is_float, is_float_dtype, @@ -60,6 +61,7 @@ from pandas.core.arrays.datetimes import DatetimeArray from pandas.core.arrays.timedeltas import TimedeltaArray +from pandas.core.arrays.dates import DateArray from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.construction import extract_array @@ -1120,6 +1122,8 @@ def format_array( fmt_klass = Datetime64TZFormatter elif is_timedelta64_dtype(values.dtype): fmt_klass = Timedelta64Formatter + elif is_date_dtype(values.dtype): + fmt_klass = DateFormatter elif is_extension_array_dtype(values.dtype): fmt_klass = ExtensionArrayFormatter elif is_float_dtype(values.dtype) or is_complex_dtype(values.dtype): @@ -1447,6 +1451,26 @@ def _format_strings(self) -> List[str]: return fmt_values.tolist() +class DateFormatter(GenericArrayFormatter): + def __init__( + self, + values: Union[np.ndarray, "Series", DateArray], + nat_rep: str = "NaT", + date_format: None = None, + **kwargs, + ): + super().__init__(values, **kwargs) + self.nat_rep = nat_rep + self.date_format = date_format + + def _format_strings(self) -> List[str]: + values = self.values + if self.formatter is not None and callable(self.formatter): + return [self.formatter(x) for x in values] + + return values._data.astype("str") + + class ExtensionArrayFormatter(GenericArrayFormatter): def _format_strings(self) -> List[str]: values = extract_array(self.values, extract_numpy=True) @@ -1605,7 +1629,8 @@ def _get_format_datetime64( def _get_format_datetime64_from_values( - values: Union[np.ndarray, DatetimeArray, DatetimeIndex], date_format: Optional[str] + values: Union[np.ndarray, DatetimeArray, DatetimeIndex, DateArray], + date_format: Optional[str], ) -> Optional[str]: """ given values and a date_format, return a string format """ if isinstance(values, np.ndarray) and values.ndim > 1: diff --git a/pandas/tests/arrays/test_dates.py b/pandas/tests/arrays/test_dates.py new file mode 100644 index 0000000000000..23ea08cde0e3a --- /dev/null +++ b/pandas/tests/arrays/test_dates.py @@ -0,0 +1,184 @@ +from pandas import Series +import pandas as pd +import numpy as np +from pandas.core.arrays.dates import DateArray +from pandas.core.dtypes.common import is_integer_dtype, is_extension_array_dtype, pandas_dtype +import pytest +import pandas._testing as tm + +DATETIME_STRINGS = [ + "2001-01-01T12:00", + "2002-02-03T13:56:03.172", + "2007-07-13", + "2006-01-13", + "2010-08-13", +] +DATE_TEST_ARRAYS = [ + pd.array(np.arange(5, dtype=np.int64)), + pd.array(pd.date_range("1970-01-01", periods=5, freq="D")), + DateArray(np.arange(5, dtype=np.int64)), + ] +VALID_CONVERSION_TYPES = ["object", "string", "i8", "datetime64[ns]"] +SECONDS_IN_A_DAY = 86400 +NANO_SECONDS_IN_A_SECOND = 10 ** 9 +NANO_SECONDS_IN_A_DAY = SECONDS_IN_A_DAY * NANO_SECONDS_IN_A_SECOND + + +@pytest.mark.parametrize("type", VALID_CONVERSION_TYPES) +def test_init_date_array_from_numpy(type: str): + dt_range = pd.date_range("1970-01-01", periods=5, freq="D") + + date_range_as_type = dt_range.astype(type).to_numpy() + if type == "i8": + date_range_as_type //= NANO_SECONDS_IN_A_DAY + arr = DateArray(date_range_as_type) + tm.assert_numpy_array_equal(dt_range.date, arr.date) + + +@pytest.mark.parametrize( + "arr", + [ + pd.array(np.arange(5, dtype=np.int64)), + pd.array(np.arange(5, dtype=np.object)), + pd.date_range("1970-01-01", periods=5, freq="D").astype("object"), + pd.array(np.array(DATETIME_STRINGS, dtype="datetime64")), + # pd.array(np.array(DATETIME_STRINGS, dtype="object"), dtype="string"), + ], +) +def test_date_from_pandas_array(arr): + result = DateArray._from_sequence(arr) + if is_integer_dtype(arr): + arr *= NANO_SECONDS_IN_A_DAY + tm.assert_numpy_array_equal( + pd.array(arr.astype("datetime64[ns]")).date, result.date + ) + + +@pytest.fixture +def date_array(): + return DateArray._from_sequence(pd.array(np.arange(5, dtype=np.int64))) + + +def test_date_array_to_int(date_array): + tm.assert_numpy_array_equal(date_array.astype("i8"), np.arange(5, dtype=np.int64)) + + +def test_date_array_to_datetime64(date_array): + tm.assert_numpy_array_equal( + date_array.astype("datetime64[ns]").date, + pd.date_range("1970-01-01", periods=5, freq="D").astype("datetime64[ns]").date, + ) + + +def test_date_array_to_str(date_array): + string_dates = pd.array( + np.array(["1970-01-0%d" % x for x in range(1, 6)]), dtype="string" + ) + tm.assert_extension_array_equal(date_array.astype("string"), string_dates) + +def test_series_has_extension_array(): + date_series = Series(DateArray(np.arange(5, dtype=np.int64))) + assert is_extension_array_dtype(date_series.values) + + +@pytest.mark.parametrize( + "arr", + DATE_TEST_ARRAYS, +) +def test_other_type_to_date(arr): + date_array = DateArray(np.arange(5, dtype=np.int64)) + other_arr_to_date = arr.astype("date") + tm.assert_numpy_array_equal(date_array._data, other_arr_to_date._data) + +@pytest.mark.parametrize( + "arr", + DATE_TEST_ARRAYS, +) +def test_other_type_to_date_series(arr): + date_series = Series(DateArray(np.arange(5, dtype=np.int64))) + other_series = Series(arr).astype("date") + tm.assert_series_equal(date_series, other_series) + +@pytest.mark.parametrize( + "type", + VALID_CONVERSION_TYPES, +) +def test_date_series_to_other_type_series(type): + date_series = Series(DateArray(np.arange(5, dtype=np.int64))) + converted = date_series.astype(type) + print(converted) + print(converted.dtype) + print(pandas_dtype(type)) + assert converted.dtype == pandas_dtype(type) + +@pytest.fixture +def series(): + series = Series(["2019-01-01", "2020-12-11", "2020-10-11 12:11:12"]) + series.name = "strings" + return series + + +@pytest.fixture +def df(series: Series): + df = series.to_frame() + df["strings"] = df["strings"].astype("string") + return df + +def test_set_series_in_df(): + df["test"] = Series([1, 2, 3, 4]) + print(df) + date_series = series.astype("datetime64").astype("date").copy() + date_series.name = "dates" + df["dates"] = Series(DateArray(np.arange(5, dtype=np.int64))) + +def test_date_display_format(df: pd.DataFrame, series: Series): + display = str(Series(DateArray(np.arange(5, dtype=np.int64)))) + print(display) + expected = ( + "0 2019-01-01\n" + "1 2020-12-11\n" + "2 2020-10-11\n" + "Name: dates, dtype: date" + ) + assert display == expected + + +def test_non_array_raises(): + with pytest.raises(ValueError, match="list"): + DateArray([1, 2, 3]) + + +def test_other_type_raises(): + with pytest.raises(ValueError, match="The dtype of 'values' is incorrect.*bool"): + DateArray(np.array([1, 2, 3], dtype="bool")) + + +def test_copy(): + data = np.array([1, 2, 3], dtype="datetime64[D]") + arr = DateArray(data, copy=False) + assert arr._data is data + + arr = DateArray(data, copy=True) + assert arr._data is not data + + +@pytest.mark.parametrize("dtype", [int, np.int32, np.int64, "uint32", "uint64"]) +def test_astype_int(dtype): + arr = DateArray._from_sequence([pd.Timestamp("2000"), pd.Timestamp("2001")]) + result = arr.astype(dtype) + + if np.dtype(dtype).kind == "u": + expected_dtype = np.dtype("uint64") + else: + expected_dtype = np.dtype("int64") + expected = arr.astype(expected_dtype) + + assert result.dtype == expected_dtype + tm.assert_numpy_array_equal(result, expected) + +if __name__ == '__main__': + series = Series(["2019-01-01", "2020-12-11", "2020-10-11 12:11:12"]) + series.name = "strings" + df = series.to_frame() + df["strings"] = df["strings"].astype("string") + test_dtype_name_display(df, series) \ No newline at end of file
- [ ] closes #32473 This is the beginning of the date data type, and it definitely works properly on a high level. For some of the places where strings might need to be converted and such I used the cython code that was implemented in tslib. The time complexity is still linear, but some of those methods may need to be rewritten for dates in cython, which I'm happy to do.
https://api.github.com/repos/pandas-dev/pandas/pulls/34441
2020-05-28T20:55:54Z
2020-12-29T20:41:26Z
null
2020-12-29T20:41:26Z
TST #24444 converting Period to Timestamp
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 807cfbd524d92..37a882d91dc78 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -3562,7 +3562,7 @@ _lite_rule_alias = { "Min": "T", "min": "T", - "ms": "L", + "ms": "U", "us": "U", "ns": "N", } diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py index e81f2ee55eebd..6f92ac2b5143c 100644 --- a/pandas/tests/scalar/period/test_period.py +++ b/pandas/tests/scalar/period/test_period.py @@ -632,6 +632,26 @@ def _ex(p): result = p.to_timestamp("5S", how="start") assert result == expected + @pytest.mark.parametrize("day_str", ["2020-12-31", "2019-01-01", "1970-01-01"]) + @pytest.mark.parametrize( + "hour, expected", + [ + ("23:59:59.999", 999000), + ("23:59:59.999999", 999999), + ("23:59:59.000999", 999), + ("23:59:59.004999", 4999), + ("23:59:59.005", 5000), + ("23:59:59.005001", 5001), + ("23:59:59.000001", 1), + ("23:59:59", 0), + ], + ) + @pytest.mark.parametrize("freq", [None, "ms", "ns"]) + def test_to_timestamp_microsecond(self, day_str, hour, expected, freq): + # GH 24444 + result = Period(day_str + " " + hour).to_timestamp(freq=freq).microsecond + assert result == expected + # -------------------------------------------------------------- # Rendering: __repr__, strftime, etc
I've tested intervals `ms`, `s` and `min`. Note: the test raises an error for `ms` on v1.0.3, but is successful on master Note: I did not choose to close this issue has the test fail for `ns` unit [ ] #24444 [ 1 ] tests added / passed [ x ] passes `black pandas` [ x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/34439
2020-05-28T20:44:49Z
2020-06-07T08:45:06Z
null
2020-06-07T09:03:17Z
BUG: Fix to GH34422 SeriesGroupBy works only with 'func' now
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 5ef1f9dea5091..88524b1f458ff 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -907,6 +907,7 @@ Groupby/resample/rolling to the input DataFrame is inconsistent. An internal heuristic to detect index mutation would behave differently for equal but not identical indices. In particular, the result index shape might change if a copy of the input would be returned. The behaviour now is consistent, independent of internal heuristics. (:issue:`31612`, :issue:`14927`, :issue:`13056`) +- Bug in :meth:`SeriesGroupBy.agg` where any column name was accepted in the named aggregation of ``SeriesGroupBy`` previously. The behaviour now allows only ``str`` and callables else would raise ``TypeError``. (:issue:`34422`) Reshaping ^^^^^^^^^ diff --git a/pandas/core/aggregation.py b/pandas/core/aggregation.py index 6130e05b2a4dc..838722f60b380 100644 --- a/pandas/core/aggregation.py +++ b/pandas/core/aggregation.py @@ -5,7 +5,7 @@ from collections import defaultdict from functools import partial -from typing import Any, DefaultDict, List, Sequence, Tuple +from typing import Any, Callable, DefaultDict, List, Sequence, Tuple, Union from pandas.core.dtypes.common import is_dict_like, is_list_like @@ -196,3 +196,39 @@ def maybe_mangle_lambdas(agg_spec: Any) -> Any: mangled_aggspec = _managle_lambda_list(agg_spec) return mangled_aggspec + + +def validate_func_kwargs( + kwargs: dict, +) -> Tuple[List[str], List[Union[str, Callable[..., Any]]]]: + """ + Validates types of user-provided "named aggregation" kwargs. + `TypeError` is raised if aggfunc is not `str` or callable. + + Parameters + ---------- + kwargs : dict + + Returns + ------- + columns : List[str] + List of user-provied keys. + func : List[Union[str, callable[...,Any]]] + List of user-provided aggfuncs + + Examples + -------- + >>> validate_func_kwargs({'one': 'min', 'two': 'max'}) + (['one', 'two'], ['min', 'max']) + """ + no_arg_message = "Must provide 'func' or named aggregation **kwargs." + tuple_given_message = "func is expected but recieved {} in **kwargs." + columns = list(kwargs) + func = [] + for col_func in kwargs.values(): + if not (isinstance(col_func, str) or callable(col_func)): + raise TypeError(tuple_given_message.format(type(col_func).__name__)) + func.append(col_func) + if not columns: + raise TypeError(no_arg_message) + return columns, func diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index ea4b6f4e65341..d589b0e0fe83c 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -57,6 +57,7 @@ is_multi_agg_with_relabel, maybe_mangle_lambdas, normalize_keyword_aggregation, + validate_func_kwargs, ) import pandas.core.algorithms as algorithms from pandas.core.base import DataError, SpecificationError @@ -233,13 +234,9 @@ def aggregate( relabeling = func is None columns = None - no_arg_message = "Must provide 'func' or named aggregation **kwargs." if relabeling: - columns = list(kwargs) - func = [kwargs[col] for col in columns] + columns, func = validate_func_kwargs(kwargs) kwargs = {} - if not columns: - raise TypeError(no_arg_message) if isinstance(func, str): return getattr(self, func)(*args, **kwargs) diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py index d4b061594c364..371ec11cdba77 100644 --- a/pandas/tests/groupby/aggregate/test_aggregate.py +++ b/pandas/tests/groupby/aggregate/test_aggregate.py @@ -511,6 +511,21 @@ def test_mangled(self): expected = pd.DataFrame({"a": [0, 0], "b": [1, 1]}) tm.assert_frame_equal(result, expected) + @pytest.mark.parametrize( + "inp", + [ + pd.NamedAgg(column="anything", aggfunc="min"), + ("anything", "min"), + ["anything", "min"], + ], + ) + def test_named_agg_nametuple(self, inp): + # GH34422 + s = pd.Series([1, 1, 2, 2, 3, 3, 4, 5]) + msg = f"func is expected but recieved {type(inp).__name__}" + with pytest.raises(TypeError, match=msg): + s.groupby(s.values).agg(a=inp) + class TestNamedAggregationDataFrame: def test_agg_relabel(self):
- [X] closes #34422 - [x] tests added / passed - [x] passes `black pandas` - [x] whatsnew entry This PR tries to fix the bug pointed in #34422 where `SeriesGroupBy.agg` works with any given column name in `NamedAgg`. After discussing with @TomAugspurger and @MarcoGorelli in another issue #34380 regarding this issue in the comments, We came to a solution that > So for SeriesGroupBy.agg, if there are any tuples or NamedAgg present in kwargs then I think we should raise. > Disallowing `NamedAgg` and tuples with `SeriesGroupBy`. #### Before fix: ```python3 s = pd.Series([1,1,2,2,3,3,4,5]) s.groupby(s.values).agg(one = pd.NamedAgg(column='anything',aggfunc='sum')) one 1 2 2 4 3 6 4 4 5 5 s.groupby(s.values).agg(one=('something','sum')) one 1 2 2 4 3 6 4 4 5 5 ``` #### After fix: ```python3 s = pd.Series([1,1,2,2,3,3,4,5]) s.groupby(s.values).agg(one = pd.NamedAgg(column='anything',aggfunc='sum')) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "d:\#gh34422\pandas\pandas\core\groupby\generic.py", line 243, in aggregate raise TypeError(tuple_given_message.format(type(kwargs[col]).__name__)) TypeError: 'func' is expected but recieved NamedAgg in **kwargs. s.groupby(s.values).agg(one=('something','sum')) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "d:\#gh34422\pandas\pandas\core\groupby\generic.py", line 243, in aggregate raise TypeError(tuple_given_message.format(type(kwargs[col]).__name__)) TypeError: 'func' is expected but recieved tuple in **kwargs. ```
https://api.github.com/repos/pandas-dev/pandas/pulls/34435
2020-05-28T18:42:25Z
2020-06-03T22:26:16Z
2020-06-03T22:26:15Z
2020-06-05T17:30:50Z
TST: additional regression cases for slicing blockwise op (GH34421)
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index 993b644c6993b..e7b7f3e524d44 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -1523,7 +1523,16 @@ def test_dataframe_blockwise_slicelike(): df2 = df1.copy() df2.iloc[0, [1, 3, 7]] = np.nan - res = df1 + df2 + df3 = df1.copy() + df3.iloc[0, [5]] = np.nan - expected = pd.DataFrame({i: df1[i] + df2[i] for i in df1.columns}) - tm.assert_frame_equal(res, expected) + df4 = df1.copy() + df4.iloc[0, np.arange(2, 5)] = np.nan + df5 = df1.copy() + df5.iloc[0, np.arange(4, 7)] = np.nan + + for left, right in [(df1, df2), (df2, df3), (df4, df5)]: + res = left + right + + expected = pd.DataFrame({i: left[i] + right[i] for i in left.columns}) + tm.assert_frame_equal(res, expected)
Adding the other case from https://github.com/pandas-dev/pandas/issues/34367 to the test as well (although also fixed by the PR, it was failing with a different error originally, so might be worth it to add as test case as well). cc @jbrockmendel
https://api.github.com/repos/pandas-dev/pandas/pulls/34434
2020-05-28T18:17:28Z
2020-05-28T22:20:24Z
2020-05-28T22:20:24Z
2020-05-29T06:38:58Z
DOC: reduce API docs for offset aliases
diff --git a/doc/source/reference/offset_frequency.rst b/doc/source/reference/offset_frequency.rst index ee89df3114048..1b63253cde2c5 100644 --- a/doc/source/reference/offset_frequency.rst +++ b/doc/source/reference/offset_frequency.rst @@ -42,11 +42,20 @@ Methods BusinessDay ----------- + .. autosummary:: :toctree: api/ BusinessDay +Alias: + +.. autosummary:: + :toctree: api/ + :template: autosummary/class_without_autosummary.rst + + BDay + Properties ~~~~~~~~~~ .. autosummary:: @@ -117,11 +126,20 @@ Methods CustomBusinessDay ----------------- + .. autosummary:: :toctree: api/ CustomBusinessDay +Alias: + +.. autosummary:: + :toctree: api/ + :template: autosummary/class_without_autosummary.rst + + CDay + Properties ~~~~~~~~~~ .. autosummary:: @@ -260,11 +278,20 @@ Methods BusinessMonthEnd ---------------- + .. autosummary:: :toctree: api/ BusinessMonthEnd +Alias: + +.. autosummary:: + :toctree: api/ + :template: autosummary/class_without_autosummary.rst + + BMonthEnd + Properties ~~~~~~~~~~ .. autosummary:: @@ -294,11 +321,20 @@ Methods BusinessMonthBegin ------------------ + .. autosummary:: :toctree: api/ BusinessMonthBegin +Alias: + +.. autosummary:: + :toctree: api/ + :template: autosummary/class_without_autosummary.rst + + BMonthBegin + Properties ~~~~~~~~~~ .. autosummary:: @@ -328,11 +364,20 @@ Methods CustomBusinessMonthEnd ---------------------- + .. autosummary:: :toctree: api/ CustomBusinessMonthEnd +Alias: + +.. autosummary:: + :toctree: api/ + :template: autosummary/class_without_autosummary.rst + + CBMonthEnd + Properties ~~~~~~~~~~ .. autosummary:: @@ -365,11 +410,20 @@ Methods CustomBusinessMonthBegin ------------------------ + .. autosummary:: :toctree: api/ CustomBusinessMonthBegin +Alias: + +.. autosummary:: + :toctree: api/ + :template: autosummary/class_without_autosummary.rst + + CBMonthBegin + Properties ~~~~~~~~~~ .. autosummary:: @@ -1238,251 +1292,6 @@ Methods Nano.__call__ Nano.apply -BDay ----- -.. autosummary:: - :toctree: api/ - - BDay - -Properties -~~~~~~~~~~ -.. autosummary:: - :toctree: api/ - - BDay.base - BDay.freqstr - BDay.kwds - BDay.name - BDay.nanos - BDay.normalize - BDay.offset - BDay.rule_code - BDay.n - BDay.weekmask - BDay.holidays - BDay.calendar - -Methods -~~~~~~~ -.. autosummary:: - :toctree: api/ - - BDay.apply - BDay.apply_index - BDay.copy - BDay.isAnchored - BDay.onOffset - BDay.is_anchored - BDay.is_on_offset - BDay.rollback - BDay.rollforward - BDay.__call__ - -BMonthEnd ---------- -.. autosummary:: - :toctree: api/ - - BMonthEnd - -Properties -~~~~~~~~~~ -.. autosummary:: - :toctree: api/ - - BMonthEnd.base - BMonthEnd.freqstr - BMonthEnd.kwds - BMonthEnd.name - BMonthEnd.nanos - BMonthEnd.normalize - BMonthEnd.rule_code - BMonthEnd.n - -Methods -~~~~~~~ -.. autosummary:: - :toctree: api/ - - BMonthEnd.apply - BMonthEnd.apply_index - BMonthEnd.copy - BMonthEnd.isAnchored - BMonthEnd.onOffset - BMonthEnd.is_anchored - BMonthEnd.is_on_offset - BMonthEnd.rollback - BMonthEnd.rollforward - BMonthEnd.__call__ - -BMonthBegin ------------ -.. autosummary:: - :toctree: api/ - - BMonthBegin - -Properties -~~~~~~~~~~ -.. autosummary:: - :toctree: api/ - - BMonthBegin.base - BMonthBegin.freqstr - BMonthBegin.kwds - BMonthBegin.name - BMonthBegin.nanos - BMonthBegin.normalize - BMonthBegin.rule_code - BMonthBegin.n - -Methods -~~~~~~~ -.. autosummary:: - :toctree: api/ - - BMonthBegin.apply - BMonthBegin.apply_index - BMonthBegin.copy - BMonthBegin.isAnchored - BMonthBegin.onOffset - BMonthBegin.is_anchored - BMonthBegin.is_on_offset - BMonthBegin.rollback - BMonthBegin.rollforward - BMonthBegin.__call__ - -CBMonthEnd ----------- -.. autosummary:: - :toctree: api/ - - CBMonthEnd - -Properties -~~~~~~~~~~ -.. autosummary:: - :toctree: api/ - - CBMonthEnd.base - CBMonthEnd.cbday_roll - CBMonthEnd.freqstr - CBMonthEnd.kwds - CBMonthEnd.m_offset - CBMonthEnd.month_roll - CBMonthEnd.name - CBMonthEnd.nanos - CBMonthEnd.normalize - CBMonthEnd.offset - CBMonthEnd.rule_code - CBMonthEnd.n - CBMonthEnd.weekmask - CBMonthEnd.holidays - CBMonthEnd.calendar - -Methods -~~~~~~~ -.. autosummary:: - :toctree: api/ - - CBMonthEnd.apply - CBMonthEnd.apply_index - CBMonthEnd.copy - CBMonthEnd.isAnchored - CBMonthEnd.onOffset - CBMonthEnd.is_anchored - CBMonthEnd.is_on_offset - CBMonthEnd.rollback - CBMonthEnd.rollforward - CBMonthEnd.__call__ - -CBMonthBegin ------------- -.. autosummary:: - :toctree: api/ - - CBMonthBegin - -Properties -~~~~~~~~~~ -.. autosummary:: - :toctree: api/ - - CBMonthBegin.base - CBMonthBegin.cbday_roll - CBMonthBegin.freqstr - CBMonthBegin.kwds - CBMonthBegin.m_offset - CBMonthBegin.month_roll - CBMonthBegin.name - CBMonthBegin.nanos - CBMonthBegin.normalize - CBMonthBegin.offset - CBMonthBegin.rule_code - CBMonthBegin.n - CBMonthBegin.weekmask - CBMonthBegin.holidays - CBMonthBegin.calendar - -Methods -~~~~~~~ -.. autosummary:: - :toctree: api/ - - CBMonthBegin.apply - CBMonthBegin.apply_index - CBMonthBegin.copy - CBMonthBegin.isAnchored - CBMonthBegin.onOffset - CBMonthBegin.is_anchored - CBMonthBegin.is_on_offset - CBMonthBegin.rollback - CBMonthBegin.rollforward - CBMonthBegin.__call__ - -CDay ----- -.. autosummary:: - :toctree: api/ - - CDay - -Properties -~~~~~~~~~~ -.. autosummary:: - :toctree: api/ - - CDay.base - CDay.freqstr - CDay.kwds - CDay.name - CDay.nanos - CDay.normalize - CDay.offset - CDay.rule_code - CDay.n - CDay.weekmask - CDay.calendar - CDay.holidays - -Methods -~~~~~~~ -.. autosummary:: - :toctree: api/ - - CDay.apply - CDay.apply_index - CDay.copy - CDay.isAnchored - CDay.onOffset - CDay.is_anchored - CDay.is_on_offset - CDay.rollback - CDay.rollforward - CDay.__call__ - - .. _api.frequencies: ===========
Test to see if this works with sphinx
https://api.github.com/repos/pandas-dev/pandas/pulls/34433
2020-05-28T17:38:54Z
2020-06-19T11:01:22Z
2020-06-19T11:01:22Z
2020-06-19T11:01:29Z
CLN: assorted tslibs cleanups
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 660cd3af1b35e..baf8889b415fb 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -1325,7 +1325,7 @@ cdef class BusinessDay(BusinessMixin): off_str += str(td.microseconds) + "us" return off_str - if isinstance(self.offset, timedelta): + if PyDelta_Check(self.offset): zero = timedelta(0, 0, 0) if self.offset >= zero: off_str = "+" + get_str(self.offset) @@ -1337,7 +1337,7 @@ cdef class BusinessDay(BusinessMixin): @apply_wraps def apply(self, other): - if isinstance(other, datetime): + if PyDateTime_Check(other): n = self.n wday = other.weekday() @@ -1368,7 +1368,7 @@ cdef class BusinessDay(BusinessMixin): result = result + self.offset return result - elif isinstance(other, (timedelta, Tick)): + elif PyDelta_Check(other) or isinstance(other, Tick): return BusinessDay( self.n, offset=self.offset + other, normalize=self.normalize ) @@ -1649,7 +1649,7 @@ cdef class BusinessHour(BusinessMixin): @apply_wraps def apply(self, other): - if isinstance(other, datetime): + if PyDateTime_Check(other): # used for detecting edge condition nanosecond = getattr(other, "nanosecond", 0) # reset timezone and nanosecond @@ -2511,7 +2511,7 @@ cdef class Week(SingleConstructorOffset): if self.weekday is None: return other + self.n * self._inc - if not isinstance(other, datetime): + if not PyDateTime_Check(other): raise TypeError( f"Cannot add {type(other).__name__} to {type(self).__name__}" ) @@ -3305,7 +3305,7 @@ class CustomBusinessDay(CustomMixin, BusinessDay): else: roll = "backward" - if isinstance(other, datetime): + if PyDateTime_Check(other): date_in = other np_dt = np.datetime64(date_in.date()) @@ -3320,7 +3320,7 @@ class CustomBusinessDay(CustomMixin, BusinessDay): result = result + self.offset return result - elif isinstance(other, (timedelta, Tick)): + elif PyDelta_Check(other) or isinstance(other, Tick): return BDay(self.n, offset=self.offset + other, normalize=self.normalize) else: raise ApplyTypeError( diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index ec6f8de159dae..9757c4d36d5fa 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -40,7 +40,10 @@ cimport pandas._libs.tslibs.util as util from pandas._libs.tslibs.timestamps import Timestamp from pandas._libs.tslibs.timezones cimport is_utc, is_tzlocal, get_dst_info from pandas._libs.tslibs.timedeltas import Timedelta -from pandas._libs.tslibs.timedeltas cimport delta_to_nanoseconds +from pandas._libs.tslibs.timedeltas cimport ( + delta_to_nanoseconds, + is_any_td_scalar, +) from pandas._libs.tslibs.ccalendar cimport ( dayofweek, @@ -1591,7 +1594,7 @@ cdef class _Period: return NaT return other.__add__(self) - if is_any_tdlike_scalar(other): + if is_any_td_scalar(other): return self._add_delta(other) elif is_offset_object(other): return self._add_offset(other) @@ -1618,7 +1621,7 @@ cdef class _Period: return NaT return NotImplemented - elif is_any_tdlike_scalar(other): + elif is_any_td_scalar(other): neg_other = -other return self + neg_other elif is_offset_object(other): @@ -2494,18 +2497,3 @@ def validate_end_alias(how): if how not in {'S', 'E'}: raise ValueError('How must be one of S or E') return how - - -cpdef is_any_tdlike_scalar(object obj): - """ - Cython equivalent for `isinstance(obj, (timedelta, np.timedelta64, Tick))` - - Parameters - ---------- - obj : object - - Returns - ------- - bool - """ - return util.is_timedelta64_object(obj) or PyDelta_Check(obj) or is_tick_object(obj) diff --git a/pandas/_libs/tslibs/timedeltas.pxd b/pandas/_libs/tslibs/timedeltas.pxd index 8f9c1b190b021..95ddf8840e65d 100644 --- a/pandas/_libs/tslibs/timedeltas.pxd +++ b/pandas/_libs/tslibs/timedeltas.pxd @@ -3,3 +3,4 @@ from numpy cimport int64_t # Exposed for tslib, not intended for outside use. cpdef int64_t delta_to_nanoseconds(delta) except? -1 cdef convert_to_timedelta64(object ts, object unit) +cdef bint is_any_td_scalar(object obj) diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 5bc3694cf8dad..f7f8b86359732 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -446,7 +446,8 @@ cdef inline timedelta_from_spec(object number, object frac, object unit): frac : a list of frac digits unit : a list of unit characters """ - cdef object n + cdef: + str n try: unit = ''.join(unit) @@ -1376,6 +1377,17 @@ class Timedelta(_Timedelta): cdef bint is_any_td_scalar(object obj): + """ + Cython equivalent for `isinstance(obj, (timedelta, np.timedelta64, Tick))` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ return ( PyDelta_Check(obj) or is_timedelta64_object(obj) or is_tick_object(obj) ) diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 9fd62e5e25c54..48c4afe7d4c1b 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -808,7 +808,7 @@ class Timestamp(_Timestamp): # check that only ts_input is passed # checking verbosely, because cython doesn't optimize # list comprehensions (as of cython 0.29.x) - if (isinstance(ts_input, Timestamp) and freq is None and + if (isinstance(ts_input, _Timestamp) and freq is None and tz is None and unit is None and year is None and month is None and day is None and hour is None and minute is None and second is None and
https://api.github.com/repos/pandas-dev/pandas/pulls/34432
2020-05-28T15:41:33Z
2020-05-28T20:56:28Z
2020-05-28T20:56:28Z
2020-05-28T21:28:52Z
DOC: cleanup whatsnew post 1.0.4 release
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 5ef1f9dea5091..4f37cf9931075 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -706,9 +706,6 @@ Categorical - Bug when passing categorical data to :class:`Index` constructor along with ``dtype=object`` incorrectly returning a :class:`CategoricalIndex` instead of object-dtype :class:`Index` (:issue:`32167`) - Bug where :class:`Categorical` comparison operator ``__ne__`` would incorrectly evaluate to ``False`` when either element was missing (:issue:`32276`) - :meth:`Categorical.fillna` now accepts :class:`Categorical` ``other`` argument (:issue:`32420`) -- Bug where :meth:`Categorical.replace` would replace with ``NaN`` whenever the new value and replacement value were equal (:issue:`33288`) -- Bug where an ordered :class:`Categorical` containing only ``NaN`` values would raise rather than returning ``NaN`` when taking the minimum or maximum (:issue:`33450`) -- Bug where :meth:`Series.isna` and :meth:`DataFrame.isna` would raise for categorical dtype when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`33594`) Datetimelike ^^^^^^^^^^^^ @@ -759,7 +756,6 @@ Numeric - Bug in :meth:`DataFrame.mean` with ``numeric_only=False`` and either ``datetime64`` dtype or ``PeriodDtype`` column incorrectly raising ``TypeError`` (:issue:`32426`) - Bug in :meth:`DataFrame.count` with ``level="foo"`` and index level ``"foo"`` containing NaNs causes segmentation fault (:issue:`21824`) - Bug in :meth:`DataFrame.diff` with ``axis=1`` returning incorrect results with mixed dtypes (:issue:`32995`) -- Bug in DataFrame reductions using ``numeric_only=True`` and ExtensionArrays (:issue:`33256`). - Bug in :meth:`DataFrame.corr` and :meth:`DataFrame.cov` raising when handling nullable integer columns with ``pandas.NA`` (:issue:`33803`) - Bug in :class:`DataFrame` and :class:`Series` addition and subtraction between object-dtype objects and ``datetime64`` dtype objects (:issue:`33824`) @@ -796,7 +792,6 @@ Indexing - Bug in :meth:`DataFrame.iloc.__setitem__` on a :class:`DataFrame` with duplicate columns incorrectly setting values for all matching columns (:issue:`15686`, :issue:`22036`) - Bug in :meth:`DataFrame.loc:` and :meth:`Series.loc` with a :class:`DatetimeIndex`, :class:`TimedeltaIndex`, or :class:`PeriodIndex` incorrectly allowing lookups of non-matching datetime-like dtypes (:issue:`32650`) - Bug in :meth:`Series.__getitem__` indexing with non-standard scalars, e.g. ``np.dtype`` (:issue:`32684`) -- Fix to preserve the ability to index with the "nearest" method with xarray's CFTimeIndex, an :class:`Index` subclass (`pydata/xarray#3751 <https://github.com/pydata/xarray/issues/3751>`_, :issue:`32905`). - Bug in :class:`Index` constructor where an unhelpful error message was raised for ``numpy`` scalars (:issue:`33017`) - Bug in :meth:`DataFrame.lookup` incorrectly raising an ``AttributeError`` when ``frame.index`` or ``frame.columns`` is not unique; this will now raise a ``ValueError`` with a helpful error message (:issue:`33041`) - Bug in :meth:`DataFrame.iloc.__setitem__` creating a new array instead of overwriting ``Categorical`` values in-place (:issue:`32831`) @@ -866,11 +861,7 @@ I/O - Bug in :func:`pandas.io.json.json_normalize` where location specified by `record_path` doesn't point to an array. (:issue:`26284`) - :func:`pandas.read_hdf` has a more explicit error message when loading an unsupported HDF file (:issue:`9539`) -- Bug in :meth:`~DataFrame.to_parquet` was not raising ``PermissionError`` when writing to a private s3 bucket with invalid creds. (:issue:`27679`) -- Bug in :meth:`~DataFrame.to_csv` was silently failing when writing to an invalid s3 bucket. (:issue:`32486`) - Bug in :meth:`~DataFrame.read_feather` was raising an `ArrowIOError` when reading an s3 or http file path (:issue:`29055`) -- Bug in :meth:`read_parquet` was raising a ``FileNotFoundError`` when passed an s3 directory path. (:issue:`26388`) -- Bug in :meth:`~DataFrame.to_parquet` was throwing an ``AttributeError`` when writing a partitioned parquet file to s3 (:issue:`27596`) - Bug in :meth:`~DataFrame.to_excel` could not handle the column name `render` and was raising an ``KeyError`` (:issue:`34331`) - Bug in :meth:`~SQLDatabase.execute` was raising a ``ProgrammingError`` for some DB-API drivers when the SQL statement contained the `%` character and no parameters were present (:issue:`34211`) @@ -893,16 +884,9 @@ Groupby/resample/rolling - Bug in :meth:`DataFrame.groupby` and :meth:`Series.groupby` produces inconsistent type when aggregating Boolean series (:issue:`32894`) - Bug in :meth:`DataFrameGroupBy.sum` and :meth:`SeriesGroupBy.sum` where a large negative number would be returned when the number of non-null values was below ``min_count`` for nullable integer dtypes (:issue:`32861`) - Bug in :meth:`SeriesGroupBy.quantile` raising on nullable integers (:issue:`33136`) -- Bug in :meth:`SeriesGroupBy.first`, :meth:`SeriesGroupBy.last`, :meth:`SeriesGroupBy.min`, and :meth:`SeriesGroupBy.max` returning floats when applied to nullable Booleans (:issue:`33071`) -- Bug in :meth:`DataFrameGroupBy.agg` with dictionary input losing ``ExtensionArray`` dtypes (:issue:`32194`) - Bug in :meth:`DataFrame.resample` where an ``AmbiguousTimeError`` would be raised when the resulting timezone aware :class:`DatetimeIndex` had a DST transition at midnight (:issue:`25758`) - Bug in :meth:`DataFrame.groupby` where a ``ValueError`` would be raised when grouping by a categorical column with read-only categories and ``sort=False`` (:issue:`33410`) -- Bug in :meth:`GroupBy.first` and :meth:`GroupBy.last` where None is not preserved in object dtype (:issue:`32800`) -- Bug in :meth:`GroupBy.quantile` causes the quantiles to be shifted when the ``by`` axis contains ``NaN`` (:issue:`33200`, :issue:`33569`) -- Bug in :meth:`Rolling.min` and :meth:`Rolling.max`: Growing memory usage after multiple calls when using a fixed window (:issue:`30726`) -- Bug in :meth:`Series.groupby` would raise ``ValueError`` when grouping by :class:`PeriodIndex` level (:issue:`34010`) - Bug in :meth:`GroupBy.agg`, :meth:`GroupBy.transform`, and :meth:`GroupBy.resample` where subclasses are not preserved (:issue:`28330`) -- Bug in :meth:`GroupBy.rolling.apply` ignores args and kwargs parameters (:issue:`33433`) - Bug in :meth:`core.groupby.DataFrameGroupBy.apply` where the output index shape for functions returning a DataFrame which is equally indexed to the input DataFrame is inconsistent. An internal heuristic to detect index mutation would behave differently for equal but not identical indices. In particular, the result index shape might change if a copy of the input would be returned. @@ -934,7 +918,6 @@ Reshaping returning subclassed types. (:issue:`31331`) - Bug in :func:`concat` was not allowing for concatenation of ``DataFrame`` and ``Series`` with duplicate keys (:issue:`33654`) - Bug in :func:`cut` raised an error when non-unique labels (:issue:`33141`) -- Bug in :meth:`DataFrame.replace` casts columns to ``object`` dtype if items in ``to_replace`` not in values (:issue:`32988`) - Ensure only named functions can be used in :func:`eval()` (:issue:`32460`) - Fixed bug in :func:`melt` where melting MultiIndex columns with ``col_level`` > 0 would raise a ``KeyError`` on ``id_vars`` (:issue:`34129`) @@ -973,7 +956,6 @@ Other - Bug in :meth:`DataFrame.__dir__` caused a segfault when using unicode surrogates in a column name (:issue:`25509`) - Bug in :meth:`DataFrame.plot.scatter` caused an error when plotting variable marker sizes (:issue:`32904`) - :class:`IntegerArray` now implements the ``sum`` operation (:issue:`33172`) -- More informative error message with ``np.min`` or ``np.max`` on unordered :class:`Categorical` (:issue:`33115`) - Bug in :class:`Tick` comparisons raising ``TypeError`` when comparing against timedelta-like objects (:issue:`34088`) .. ---------------------------------------------------------------------------
https://api.github.com/repos/pandas-dev/pandas/pulls/34430
2020-05-28T12:21:33Z
2020-05-28T18:13:52Z
2020-05-28T18:13:51Z
2020-06-01T15:47:51Z
Fix read_sql empty result with chunksize bug GH34411
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 196d2f2d968a7..0cb46a5164674 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -281,6 +281,7 @@ I/O - :func:`read_excel` now respects :func:`set_option` (:issue:`34252`) - Bug in :func:`read_csv` not switching ``true_values`` and ``false_values`` for nullable ``boolean`` dtype (:issue:`34655`) - Bug in :func:`read_json` when ``orient="split"`` does not maintain numeric string index (:issue:`28556`) +- :meth:`read_sql` returned an empty generator if ``chunksize`` was no-zero and the query returned no results. Now returns a generator with a single empty dataframe (:issue:`34411`) Period ^^^^^^ diff --git a/pandas/io/sql.py b/pandas/io/sql.py index bbc5e6ad82493..e1af3169420fc 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -926,11 +926,17 @@ def _query_iterator( parse_dates=None, ): """Return generator through chunked result set.""" + has_read_data = False while True: data = result.fetchmany(chunksize) if not data: + if not has_read_data: + yield DataFrame.from_records( + [], columns=columns, coerce_float=coerce_float + ) break else: + has_read_data = True self.frame = DataFrame.from_records( data, columns=columns, coerce_float=coerce_float ) @@ -1343,11 +1349,21 @@ def _query_iterator( dtype: Optional[DtypeArg] = None, ): """Return generator through chunked result set""" + has_read_data = False while True: data = result.fetchmany(chunksize) if not data: + if not has_read_data: + yield _wrap_result( + [], + columns, + index_col=index_col, + coerce_float=coerce_float, + parse_dates=parse_dates, + ) break else: + has_read_data = True yield _wrap_result( data, columns, @@ -1849,14 +1865,20 @@ def _query_iterator( dtype: Optional[DtypeArg] = None, ): """Return generator through chunked result set""" + has_read_data = False while True: data = cursor.fetchmany(chunksize) if type(data) == tuple: data = list(data) if not data: cursor.close() + if not has_read_data: + yield DataFrame.from_records( + [], columns=columns, coerce_float=coerce_float + ) break else: + has_read_data = True yield _wrap_result( data, columns, diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 6fb120faa6db2..b70bc3c598702 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -661,6 +661,12 @@ def test_read_sql_view(self): iris_frame = sql.read_sql_query("SELECT * FROM iris_view", self.conn) self._check_iris_loaded_frame(iris_frame) + def test_read_sql_with_chunksize_no_result(self): + query = "SELECT * FROM iris_view WHERE SepalLength < 0.0" + with_batch = sql.read_sql_query(query, self.conn, chunksize=5) + without_batch = sql.read_sql_query(query, self.conn) + tm.assert_frame_equal(pd.concat(with_batch), without_batch) + def test_to_sql(self): sql.to_sql(self.test_frame1, "test_frame1", self.conn) assert sql.has_table("test_frame1", self.conn)
- [x ] closes #34411 - [ x] tests added / passed - [ x] passes `black pandas` - [ x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ x] whatsnew entry `pd.read_sql` was returning an empty generator when chunksize is set and the query returns zero results. Now it correctly returns a generator with a single empty DataFrame (:issue:`34411`).
https://api.github.com/repos/pandas-dev/pandas/pulls/34429
2020-05-28T11:52:29Z
2021-01-14T18:52:47Z
2021-01-14T18:52:47Z
2021-01-14T18:52:51Z
DOC: 1.0.4 whatsnew
diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst index b5ac96752536e..b381dae3579c8 100644 --- a/doc/source/whatsnew/index.rst +++ b/doc/source/whatsnew/index.rst @@ -24,6 +24,7 @@ Version 1.0 .. toctree:: :maxdepth: 2 + v1.0.4 v1.0.3 v1.0.2 v1.0.1 diff --git a/doc/source/whatsnew/v1.0.3.rst b/doc/source/whatsnew/v1.0.3.rst index 26d06433bda0c..62e6ae5b1c5cc 100644 --- a/doc/source/whatsnew/v1.0.3.rst +++ b/doc/source/whatsnew/v1.0.3.rst @@ -26,4 +26,4 @@ Bug fixes Contributors ~~~~~~~~~~~~ -.. contributors:: v1.0.2..v1.0.3|HEAD +.. contributors:: v1.0.2..v1.0.3 diff --git a/doc/source/whatsnew/v1.0.4.rst b/doc/source/whatsnew/v1.0.4.rst new file mode 100644 index 0000000000000..5cc1edc9ca9cd --- /dev/null +++ b/doc/source/whatsnew/v1.0.4.rst @@ -0,0 +1,48 @@ + +.. _whatsnew_104: + +What's new in 1.0.4 (May 28, 2020) +------------------------------------ + +These are the changes in pandas 1.0.4. See :ref:`release` for a full changelog +including other versions of pandas. + +{{ header }} + +.. --------------------------------------------------------------------------- + +.. _whatsnew_104.regressions: + +Fixed regressions +~~~~~~~~~~~~~~~~~ +- Fix regression where :meth:`Series.isna` and :meth:`DataFrame.isna` would raise for categorical dtype when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`33594`) +- Fix regression in :meth:`GroupBy.first` and :meth:`GroupBy.last` where None is not preserved in object dtype (:issue:`32800`) +- Fix regression in DataFrame reductions using ``numeric_only=True`` and ExtensionArrays (:issue:`33256`). +- Fix performance regression in ``memory_usage(deep=True)`` for object dtype (:issue:`33012`) +- Fix regression where :meth:`Categorical.replace` would replace with ``NaN`` whenever the new value and replacement value were equal (:issue:`33288`) +- Fix regression where an ordered :class:`Categorical` containing only ``NaN`` values would raise rather than returning ``NaN`` when taking the minimum or maximum (:issue:`33450`) +- Fix regression in :meth:`DataFrameGroupBy.agg` with dictionary input losing ``ExtensionArray`` dtypes (:issue:`32194`) +- Fix to preserve the ability to index with the "nearest" method with xarray's CFTimeIndex, an :class:`Index` subclass (`pydata/xarray#3751 <https://github.com/pydata/xarray/issues/3751>`_, :issue:`32905`). +- Fix regression in :meth:`DataFrame.describe` raising ``TypeError: unhashable type: 'dict'`` (:issue:`32409`) +- Fix regression in :meth:`DataFrame.replace` casts columns to ``object`` dtype if items in ``to_replace`` not in values (:issue:`32988`) +- Fix regression in :meth:`Series.groupby` would raise ``ValueError`` when grouping by :class:`PeriodIndex` level (:issue:`34010`) +- Fix regression in :meth:`GroupBy.rolling.apply` ignores args and kwargs parameters (:issue:`33433`) +- Fix regression in error message with ``np.min`` or ``np.max`` on unordered :class:`Categorical` (:issue:`33115`) +- Fix regression in :meth:`DataFrame.loc` and :meth:`Series.loc` throwing an error when a ``datetime64[ns, tz]`` value is provided (:issue:`32395`) + +.. _whatsnew_104.bug_fixes: + +Bug fixes +~~~~~~~~~ +- Bug in :meth:`SeriesGroupBy.first`, :meth:`SeriesGroupBy.last`, :meth:`SeriesGroupBy.min`, and :meth:`SeriesGroupBy.max` returning floats when applied to nullable Booleans (:issue:`33071`) +- Bug in :meth:`Rolling.min` and :meth:`Rolling.max`: Growing memory usage after multiple calls when using a fixed window (:issue:`30726`) +- Bug in :meth:`~DataFrame.to_parquet` was not raising ``PermissionError`` when writing to a private s3 bucket with invalid creds. (:issue:`27679`) +- Bug in :meth:`~DataFrame.to_csv` was silently failing when writing to an invalid s3 bucket. (:issue:`32486`) +- Bug in :meth:`read_parquet` was raising a ``FileNotFoundError`` when passed an s3 directory path. (:issue:`26388`) +- Bug in :meth:`~DataFrame.to_parquet` was throwing an ``AttributeError`` when writing a partitioned parquet file to s3 (:issue:`27596`) +- Bug in :meth:`GroupBy.quantile` causes the quantiles to be shifted when the ``by`` axis contains ``NaN`` (:issue:`33200`, :issue:`33569`) + +Contributors +~~~~~~~~~~~~ + +.. contributors:: v1.0.3..v1.0.4|HEAD
Brings 1.0.4.rst to master & sets the release date (for tomorrow) cc @simonjayhawkins.
https://api.github.com/repos/pandas-dev/pandas/pulls/34428
2020-05-28T11:38:59Z
2020-05-28T12:54:26Z
2020-05-28T12:54:26Z
2020-05-28T12:55:01Z
Categorical.(get|from)_dummies
diff --git a/doc/source/user_guide/categorical.rst b/doc/source/user_guide/categorical.rst index b7475ae7bb132..c8bdc2394ddf9 100644 --- a/doc/source/user_guide/categorical.rst +++ b/doc/source/user_guide/categorical.rst @@ -127,6 +127,45 @@ This conversion is likewise done column by column: df_cat['A'] df_cat['B'] +Dummy / indicator / one-hot encoded variables +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Some operations, like regression and classification, +encodes a single categorical variable as a column for each category, +with each row having False in all but one column (True). +These are called `dummy variables <https://en.wikipedia.org/wiki/Dummy_variable_(statistics)>`_, or one-hot encoding. +:class:`pandas.Categorical` objects can easily be converted to and from such an encoding. + +:meth:`pandas.Categorical.get_dummies` produces a dataframe of dummy variables. +It works in the same way and supports most of the same arguments as :func:`pandas.get_dummies`. + +.. ipython:: python + + cat = pd.Categorical(["a", "b", "b", "c"]) + cat + + cat.get_dummies() + +The :meth:`pandas.Categorical.from_dummies` class method accepts a dataframe +whose dtypes are coercible to boolean, and an ``ordered`` argument +for whether the resulting ``Categorical`` should be considered ordered +(like the ``Categorical`` constructor). +A column with a NA index will be ignored. +Any row which is entirely falsey, or has a missing value, +will be uncategorised. +In the same way that :func:`pandas.get_dummies` can add a prefix to string category names, +:meth:`~pandas.Categorical.from_dummies` can filter a dataframe for columns with a prefix: +the resulting ``Categorical`` will have the prefix stripped from its categories. + +.. ipython:: python + + dummies = pd.get_dummies(["a", "b", "b", "c"], prefix="cat") + dummies + + pd.Categorical.from_dummies(dummies, prefix="cat") + + +.. versionadded:: 1.2.0 Controlling behavior ~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/user_guide/reshaping.rst b/doc/source/user_guide/reshaping.rst index 1b90aeb00cf9c..a666bbd885baf 100644 --- a/doc/source/user_guide/reshaping.rst +++ b/doc/source/user_guide/reshaping.rst @@ -606,7 +606,7 @@ This function is often used along with discretization functions like ``cut``: pd.get_dummies(pd.cut(values, bins)) -See also :func:`Series.str.get_dummies <pandas.Series.str.get_dummies>`. +See also :func:`Series.str.get_dummies <pandas.Series.str.get_dummies>` and :func:`Categorical.get_dummies <pandas.Categorical.get_dummies>`. :func:`get_dummies` also accepts a ``DataFrame``. By default all categorical variables (categorical in the statistical sense, those with `object` or @@ -679,6 +679,15 @@ To choose another dtype, use the ``dtype`` argument: pd.get_dummies(df, dtype=bool).dtypes +A :class:`~pandas.Categorical` can be recovered from a :class:`~pandas.DataFrame` of such dummy variables using :meth:`~pandas.Categorical.from_dummies`. +Use the ``prefix`` and ``prefix_sep`` arguments to select and rename columns which have had a prefix applied in the same way as :class:`~pandas.get_dummies` does. + +.. ipython:: python + + df = pd.get_dummies(list("abca")) + + pd.Categorical.from_dummies(df) + .. _reshaping.factorize: diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 6a5b4b3b9ff16..c3ac951eb51b1 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -120,6 +120,7 @@ Other enhancements - `Styler` now allows direct CSS class name addition to individual data cells (:issue:`36159`) - :meth:`Rolling.mean()` and :meth:`Rolling.sum()` use Kahan summation to calculate the mean to avoid numerical problems (:issue:`10319`, :issue:`11645`, :issue:`13254`, :issue:`32761`, :issue:`36031`) - :meth:`DatetimeIndex.searchsorted`, :meth:`TimedeltaIndex.searchsorted`, :meth:`PeriodIndex.searchsorted`, and :meth:`Series.searchsorted` with datetimelike dtypes will now try to cast string arguments (listlike and scalar) to the matching datetimelike type (:issue:`36346`) +- :meth:`Categorical.from_dummies` and :meth:`Categorical.get_dummies` convert between :class:`Categorical` and :class:`DataFrame` objects of dummy variables. .. _whatsnew_120.api_breaking.python: diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index ef69d6565cfeb..224e336fae9dd 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -2,7 +2,7 @@ from functools import partial import operator from shutil import get_terminal_size -from typing import Dict, Hashable, List, Type, Union, cast +from typing import TYPE_CHECKING, Any, Dict, Hashable, List, Optional, Type, Union, cast from warnings import warn import numpy as np @@ -55,6 +55,9 @@ from pandas.io.formats import console +if TYPE_CHECKING: + from pandas._typing import DataFrame # noqa: F401 + def _cat_compare_op(op): opname = f"__{op.__name__}__" @@ -370,6 +373,221 @@ def __init__( self._dtype = self._dtype.update_dtype(dtype) self._codes = coerce_indexer_dtype(codes, dtype.categories) + @classmethod + def from_dummies( + cls, + dummies: "DataFrame", + ordered: Optional[bool] = None, + prefix: Optional[str] = None, + prefix_sep: str = "_", + fillna: Optional[bool] = None, + ) -> "Categorical": + """Create a `Categorical` using a ``DataFrame`` of dummy variables. + + Can use a subset of columns based on the ``prefix`` + and ``prefix_sep`` parameters. + + The ``DataFrame`` must have no more than one truthy value per row. + The columns of the ``DataFrame`` become the categories of the `Categorical`. + A column whose header is NA will be dropped: + any row containing a NA value will be uncategorised. + + Parameters + ---------- + dummies : DataFrame + dtypes of columns with non-NA headers must be coercible to bool. + Sparse dataframes are not supported. + ordered : bool + Whether or not this Categorical is ordered. + prefix : optional str + Only take columns whose names are strings starting + with this prefix and ``prefix_sep``, + stripping those elements from the resulting category names. + prefix_sep : str, default "_" + If ``prefix`` is not ``None``, use as the separator + between the prefix and the final name of the category. + fillna : optional bool, default None + How to handle NA values. + If ``True`` or ``False``, NA is filled with that value. + If ``None``, raise a ValueError if there are any NA values. + + Raises + ------ + ValueError + If a sample belongs to >1 category + + Returns + ------- + Categorical + + Examples + -------- + >>> simple = pd.DataFrame(np.eye(3), columns=["a", "b", "c"]) + >>> Categorical.from_dummies(simple) + [a, b, c] + Categories (3, object): [a, b, c] + + >>> nan_col = pd.DataFrame(np.eye(4), columns=["a", "b", np.nan, None]) + >>> Categorical.from_dummies(nan_col) + [a, b, NaN, NaN] + Categories (2, object): [a, b] + + >>> nan_cell = pd.DataFrame( + ... [[1, 0, np.nan], [0, 1, 0], [0, 0, 1]], + ... columns=["a", "b", "c"], + ... ) + >>> Categorical.from_dummies(nan_cell) + [NaN, b, c] + Categories (3, object): [a, b, c] + + >>> multi = pd.DataFrame( + ... [[1, 0, 1], [0, 1, 0], [0, 0, 1]], + ... columns=["a", "b", "c"], + ... ) + >>> Categorical.from_dummies(multi) + Traceback (most recent call last): + ... + ValueError: 1 record(s) belongs to multiple categories: [0] + """ + from pandas import Series + + to_drop = dummies.columns[isna(dummies.columns)] + if len(to_drop): + dummies = dummies.drop(columns=to_drop) + + cats: List[Any] + if prefix is None: + cats = list(dummies.columns) + else: + pref = prefix + (prefix_sep or "") + cats = [] + to_keep: List[str] = [] + for c in dummies.columns: + if isinstance(c, str) and c.startswith(pref): + to_keep.append(c) + cats.append(c[len(pref) :]) + dummies = dummies[to_keep] + + df = dummies.astype("boolean") + if fillna is not None: + df = df.fillna(fillna) + + row_totals = df.sum(axis=1, skipna=False) + if row_totals.isna().any(): + raise ValueError("Unhandled NA values in dummy array") + + multicat_rows = row_totals > 1 + if multicat_rows.any(): + raise ValueError( + f"{multicat_rows.sum()} record(s) belongs to multiple categories: " + f"{list(df.index[multicat_rows])}" + ) + + codes = Series(np.full(len(row_totals), np.nan), index=df.index, dtype="Int64") + codes[row_totals == 0] = -1 + row_idx, code = np.nonzero(df) + codes[row_idx] = code + + return cls.from_codes(codes.fillna(-1), cats, ordered=ordered) + + def get_dummies( + self, + prefix: Optional[str] = None, + prefix_sep: str = "_", + dummy_na: bool = False, + sparse: bool = False, + drop_first: bool = False, + dtype: Dtype = None, + ) -> "DataFrame": + """ + Convert into dummy/indicator variables. + + Parameters + ---------- + prefix : str, default None + String to append DataFrame column names. + prefix_sep : str, default '_' + If appending prefix, separator/delimiter to use. + dummy_na : bool, default False + Add a column to indicate NaNs, if False NaNs are ignored. + sparse : bool, default False + Whether the dummy-encoded columns should be backed by + a :class:`SparseArray` (True) or a regular NumPy array (False). + drop_first : bool, default False + Whether to get k-1 dummies out of k categorical levels by removing the + first level. + dtype : dtype, default np.uint8 + Data type for new columns. Only a single dtype is allowed. + + Returns + ------- + DataFrame + Dummy-coded data. + + See Also + -------- + Series.str.get_dummies : Convert Series to dummy codes. + pandas.get_dummies : Convert categorical variable to dummy/indicator variables. + + Examples + -------- + >>> s = pd.Categorical(list('abca')) + + >>> s.get_dummies() + a b c + 0 1 0 0 + 1 0 1 0 + 2 0 0 1 + 3 1 0 0 + + >>> s1 = pd.Categorical(['a', 'b', np.nan]) + + >>> s1.get_dummies() + a b + 0 1 0 + 1 0 1 + 2 0 0 + + >>> s1.get_dummies(dummy_na=True) + a b NaN + 0 1 0 0 + 1 0 1 0 + 2 0 0 1 + + >>> pd.Categorical(list('abcaa')).get_dummies() + a b c + 0 1 0 0 + 1 0 1 0 + 2 0 0 1 + 3 1 0 0 + 4 1 0 0 + + >>> pd.Categorical(list('abcaa')).get_dummies(drop_first=True) + b c + 0 0 0 + 1 1 0 + 2 0 1 + 3 0 0 + 4 0 0 + + >>> pd.Categorical(list('abc')).get_dummies(dtype=float) + a b c + 0 1.0 0.0 0.0 + 1 0.0 1.0 0.0 + 2 0.0 0.0 1.0 + """ + from pandas import get_dummies + + return get_dummies( + self, + prefix=prefix, + prefix_sep=prefix_sep, + dummy_na=dummy_na, + sparse=sparse, + drop_first=drop_first, + dtype=dtype, + ) + @property def dtype(self) -> CategoricalDtype: """ diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index 18ebe14763797..be5a39a9f90d4 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -768,6 +768,7 @@ def get_dummies( See Also -------- Series.str.get_dummies : Convert Series to dummy codes. + Categorical.get_dummies : Convert a Categorical array to dummy codes. Examples -------- diff --git a/pandas/tests/arrays/categorical/test_api.py b/pandas/tests/arrays/categorical/test_api.py index 6fce4b4145ff2..d47841618d6f0 100644 --- a/pandas/tests/arrays/categorical/test_api.py +++ b/pandas/tests/arrays/categorical/test_api.py @@ -3,7 +3,7 @@ import numpy as np import pytest -from pandas import Categorical, CategoricalIndex, DataFrame, Index, Series +from pandas import Categorical, CategoricalIndex, DataFrame, Index, Series, get_dummies import pandas._testing as tm from pandas.core.arrays.categorical import recode_for_categories from pandas.tests.arrays.categorical.common import TestCategorical @@ -399,6 +399,36 @@ def test_remove_unused_categories(self): out = cat.remove_unused_categories() assert out.tolist() == val.tolist() + @pytest.mark.parametrize( + "vals", + [ + ["a", "b", "b", "a"], + ["a", "b", "b", "a", np.nan], + [1, 1.5, "a", (1, "b")], + [1, 1.5, "a", (1, "b"), np.nan], + ], + ) + def test_get_dummies(self, vals): + # GH 8745 + cats = Categorical(Series(vals)) + tm.assert_equal(cats.get_dummies(), get_dummies(cats)) + + @pytest.mark.parametrize( + "vals", + [ + ["a", "b", "b", "a"], + ["a", "b", "b", "a", np.nan], + [1, 1.5, "a", (1, "b")], + [1, 1.5, "a", (1, "b"), np.nan], + ], + ) + def test_dummies_roundtrip(self, vals): + # GH 8745 + cats = Categorical(Series(vals)) + dummies = cats.get_dummies() + cats2 = Categorical.from_dummies(dummies) + tm.assert_equal(cats, cats2) + class TestCategoricalAPIWithFactor(TestCategorical): def test_describe(self): diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py index e200f13652a84..b4c3fe55133ae 100644 --- a/pandas/tests/arrays/categorical/test_constructors.py +++ b/pandas/tests/arrays/categorical/test_constructors.py @@ -10,6 +10,7 @@ from pandas import ( Categorical, CategoricalIndex, + DataFrame, DatetimeIndex, Index, Interval, @@ -19,6 +20,7 @@ Series, Timestamp, date_range, + get_dummies, period_range, timedelta_range, ) @@ -682,3 +684,59 @@ def test_interval(self): expected_codes = np.array([0, 1], dtype="int8") tm.assert_numpy_array_equal(cat.codes, expected_codes) tm.assert_index_equal(cat.categories, idx) + + @pytest.mark.parametrize("sparse", [True, False]) + def test_from_dummies(self, sparse): + if sparse: + pytest.xfail("from sparse is not supported") + # GH 8745 + raw = ["a", "a", "b", "c", "c", "a"] + dummies = get_dummies(raw, sparse=sparse) + cats = Categorical.from_dummies(dummies) + assert list(cats) == raw + + @pytest.mark.parametrize("na_val", [np.nan, pd.NA, None, pd.NaT]) + def test_from_dummies_nan(self, na_val): + # GH 8745 + raw = ["a", "a", "b", "c", "c", "a", na_val] + dummies = get_dummies(raw) + cats = Categorical.from_dummies(dummies) + assert list(cats)[:-1] == raw[:-1] + assert pd.isna(list(cats)[-1]) + + def test_from_dummies_multiple(self): + # GH 8745 + dummies = DataFrame([[1, 0, 1], [0, 1, 0], [0, 0, 1]], columns=["a", "b", "c"]) + with pytest.raises(ValueError, match="multiple categories"): + Categorical.from_dummies(dummies) + + @pytest.mark.parametrize("ordered", [None, False, True]) + def test_from_dummies_ordered(self, ordered): + # GH 8745 + raw = ["a", "a", "b", "c", "c", "a"] + dummies = get_dummies(raw) + cats = Categorical.from_dummies(dummies, ordered) + assert cats.ordered == bool(ordered) + + def test_from_dummies_types(self): + # GH 8745 + cols = ["a", 1, 1.5, ("a", "b"), (1, "c")] + dummies = DataFrame(np.eye(len(cols)), columns=cols) + cats = Categorical.from_dummies(dummies) + assert list(cats) == cols + + def test_from_dummies_drops_na(self): + # GH 8745 + cols = ["a", "b", np.nan] + dummies = DataFrame(np.eye(len(cols)), columns=cols) + cats = Categorical.from_dummies(dummies) + assert list(cats.categories) == cols[:-1] + assert pd.isna(cats[-1]) + + def test_from_dummies_multiindex(self): + # GH 8745 + tups = [("a", 1), ("a", 2), ("b", 1), ("b", 2)] + cols = MultiIndex.from_tuples(tups) + dummies = DataFrame(np.eye(len(cols)), columns=cols) + cats = Categorical.from_dummies(dummies) + assert list(cats.categories) == tups
Simply converting categorical variables to and from dummy variables. - [x] closes #8745 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Intentionally smaller-scoped than #31795 (and indeed `get_dummies`) as a broadly useful MVP which can be chained with other basic functionality. The tests are fairly rudimentary and I welcome any edge cases which should be picked out. ## For discussion ### from_dummies * class method rather than free function: Keeps categorical-related functionality together, reduces surface in the global namespace, more obvious what is produced. * silently drop column with NA header: wasn't sure about this. Maybe it should raise a warning? * No handling of masked dataframes or dataframes with NA values * No subsetting or renaming of columns: callers can do this themselves ### to_dummies * Name: I went for `Categorical.to_dummies` instead of matching the free function `get_dummies`. The symmetry of to/from aids understanding, and using `get_` might imply A) that something is being got, which it isn't or B) signature/ feature parity with the existing method, which wasn't a design goal for me. * `to_dummies` return type: `cls.to_dummies` returns bools, where `get_dummies` returns uint8s by default, which doesn't make a lot of sense to me as we are representing boolean data (and they're the same in memory anyway). Dummy variables are generally used for regression where a continuous variable is required, so ints don't get us any closer to what we may want, and being able to index into the categories using the row may be valuable. * No `dtype` argument: I didn't see any benefit of `cls.to_dummies(dtype=float)` over `cls.to_dummies().astype(float)`. The latter is more explicit, no slower, and minimises API surface. * No `prefix`, `prefix_sep` arguments: these unnecessarily assume string column headers. If someone wants to rename their columns, they can: IMO it's not a core requirement of this method. * `dumma_na` replaced with `na_column`: if we're including the argument, we may as well let the user decide what they want to call their column, using the dtype they prefer (e.g. `"other"`, `-1` etc). They can always supply `np.nan` if they want `get_dummies`-like behaviour. * No `sparse` argument: This I regret. Producing a sparse array would be valuable. However, it would drastically complicate the method, so I left it out for the MVP. The caller can always sparsify it after it's produced, so long as RAM isn't an issue for the temporary df. * No `drop_first` argument: If someone wants to drop one of their columns, they can (`cls.to_dummies().drop(columns="my_col")`): again, not a core requirement of this method. Broadly speaking, I'm not in favour of adding arguments to save the caller <=1 line of their own code unless there are e.g. speed gains.
https://api.github.com/repos/pandas-dev/pandas/pulls/34426
2020-05-28T10:40:18Z
2021-03-21T00:30:25Z
null
2021-03-21T00:30:25Z
DOC: 1.0.4 release notes and date
diff --git a/doc/source/whatsnew/v1.0.4.rst b/doc/source/whatsnew/v1.0.4.rst index 1f2d4e2dba370..5cc1edc9ca9cd 100644 --- a/doc/source/whatsnew/v1.0.4.rst +++ b/doc/source/whatsnew/v1.0.4.rst @@ -1,7 +1,7 @@ .. _whatsnew_104: -What's new in 1.0.4 (May ??, 2020) +What's new in 1.0.4 (May 28, 2020) ------------------------------------ These are the changes in pandas 1.0.4. See :ref:`release` for a full changelog @@ -15,21 +15,20 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ -- Bug where :meth:`Series.isna` and :meth:`DataFrame.isna` would raise for categorical dtype when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`33594`) -- Bug in :meth:`GroupBy.first` and :meth:`GroupBy.last` where None is not preserved in object dtype (:issue:`32800`) -- Bug in DataFrame reductions using ``numeric_only=True`` and ExtensionArrays (:issue:`33256`). +- Fix regression where :meth:`Series.isna` and :meth:`DataFrame.isna` would raise for categorical dtype when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`33594`) +- Fix regression in :meth:`GroupBy.first` and :meth:`GroupBy.last` where None is not preserved in object dtype (:issue:`32800`) +- Fix regression in DataFrame reductions using ``numeric_only=True`` and ExtensionArrays (:issue:`33256`). - Fix performance regression in ``memory_usage(deep=True)`` for object dtype (:issue:`33012`) -- Bug where :meth:`Categorical.replace` would replace with ``NaN`` whenever the new value and replacement value were equal (:issue:`33288`) -- Bug where an ordered :class:`Categorical` containing only ``NaN`` values would raise rather than returning ``NaN`` when taking the minimum or maximum (:issue:`33450`) -- Bug in :meth:`DataFrameGroupBy.agg` with dictionary input losing ``ExtensionArray`` dtypes (:issue:`32194`) +- Fix regression where :meth:`Categorical.replace` would replace with ``NaN`` whenever the new value and replacement value were equal (:issue:`33288`) +- Fix regression where an ordered :class:`Categorical` containing only ``NaN`` values would raise rather than returning ``NaN`` when taking the minimum or maximum (:issue:`33450`) +- Fix regression in :meth:`DataFrameGroupBy.agg` with dictionary input losing ``ExtensionArray`` dtypes (:issue:`32194`) - Fix to preserve the ability to index with the "nearest" method with xarray's CFTimeIndex, an :class:`Index` subclass (`pydata/xarray#3751 <https://github.com/pydata/xarray/issues/3751>`_, :issue:`32905`). - Fix regression in :meth:`DataFrame.describe` raising ``TypeError: unhashable type: 'dict'`` (:issue:`32409`) -- Bug in :meth:`DataFrame.replace` casts columns to ``object`` dtype if items in ``to_replace`` not in values (:issue:`32988`) -- Bug in :meth:`Series.groupby` would raise ``ValueError`` when grouping by :class:`PeriodIndex` level (:issue:`34010`) -- Bug in :meth:`GroupBy.rolling.apply` ignores args and kwargs parameters (:issue:`33433`) -- More informative error message with ``np.min`` or ``np.max`` on unordered :class:`Categorical` (:issue:`33115`) -- Fixed regression in :meth:`DataFrame.loc` and :meth:`Series.loc` throwing an error when a ``datetime64[ns, tz]`` value is provided (:issue:`32395`) -- +- Fix regression in :meth:`DataFrame.replace` casts columns to ``object`` dtype if items in ``to_replace`` not in values (:issue:`32988`) +- Fix regression in :meth:`Series.groupby` would raise ``ValueError`` when grouping by :class:`PeriodIndex` level (:issue:`34010`) +- Fix regression in :meth:`GroupBy.rolling.apply` ignores args and kwargs parameters (:issue:`33433`) +- Fix regression in error message with ``np.min`` or ``np.max`` on unordered :class:`Categorical` (:issue:`33115`) +- Fix regression in :meth:`DataFrame.loc` and :meth:`Series.loc` throwing an error when a ``datetime64[ns, tz]`` value is provided (:issue:`32395`) .. _whatsnew_104.bug_fixes: @@ -42,7 +41,6 @@ Bug fixes - Bug in :meth:`read_parquet` was raising a ``FileNotFoundError`` when passed an s3 directory path. (:issue:`26388`) - Bug in :meth:`~DataFrame.to_parquet` was throwing an ``AttributeError`` when writing a partitioned parquet file to s3 (:issue:`27596`) - Bug in :meth:`GroupBy.quantile` causes the quantiles to be shifted when the ``by`` axis contains ``NaN`` (:issue:`33200`, :issue:`33569`) -- Contributors ~~~~~~~~~~~~
https://api.github.com/repos/pandas-dev/pandas/pulls/34425
2020-05-28T10:35:29Z
2020-05-28T11:38:16Z
2020-05-28T11:38:16Z
2020-05-28T11:38:23Z
DOC: intersphinx inventory link for statsmodels
diff --git a/doc/source/conf.py b/doc/source/conf.py index e8d825a509be9..2db09841db2f8 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -413,7 +413,7 @@ "py": ("https://pylib.readthedocs.io/en/latest/", None), "python": ("https://docs.python.org/3/", None), "scipy": ("https://docs.scipy.org/doc/scipy/reference/", None), - "statsmodels": ("http://www.statsmodels.org/devel/", None), + "statsmodels": ("https://www.statsmodels.org/devel/", None), } # extlinks alias
intersphinx inventory has moved: http://www.statsmodels.org/devel/objects.inv -> https://www.statsmodels.org/devel/objects.inv EDIT: this is already change on master. this PR directly against 1.0.x
https://api.github.com/repos/pandas-dev/pandas/pulls/34424
2020-05-28T08:53:27Z
2020-05-28T09:41:36Z
2020-05-28T09:41:36Z
2020-05-28T09:41:45Z
BUG: Fix failing MacPython 32bit wheels for groupby rolling
diff --git a/pandas/core/window/indexers.py b/pandas/core/window/indexers.py index 263502e3e26dc..f0a76dc17b411 100644 --- a/pandas/core/window/indexers.py +++ b/pandas/core/window/indexers.py @@ -218,16 +218,18 @@ def get_window_bounds( start, end = indexer.get_window_bounds( len(indicies), min_periods, center, closed ) + start = start.astype(np.int64) + end = end.astype(np.int64) # Cannot use groupby_indicies as they might not be monotonic with the object # we're rolling over window_indicies = np.arange( - window_indicies_start, - window_indicies_start + len(indicies), - dtype=np.int64, + window_indicies_start, window_indicies_start + len(indicies), ) window_indicies_start += len(indicies) # Extend as we'll be slicing window like [start, end) - window_indicies = np.append(window_indicies, [window_indicies[-1] + 1]) + window_indicies = np.append( + window_indicies, [window_indicies[-1] + 1] + ).astype(np.int64) start_arrays.append(window_indicies.take(start)) end_arrays.append(window_indicies.take(end)) start = np.concatenate(start_arrays) diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 3985cd8927b27..b06128052fa8f 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -2228,7 +2228,10 @@ def _create_blocks(self, obj: FrameOrSeries): """ # Ensure the object we're rolling over is monotonically sorted relative # to the groups - obj = obj.take(np.concatenate(list(self._groupby.grouper.indices.values()))) + groupby_order = np.concatenate( + list(self._groupby.grouper.indices.values()) + ).astype(np.int64) + obj = obj.take(groupby_order) return super()._create_blocks(obj) def _get_cython_func_type(self, func: str) -> Callable:
- [x] closes #34410 - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` What would be the best way to validate that this fixes the MacPython/pandas-wheels @TomAugspurger
https://api.github.com/repos/pandas-dev/pandas/pulls/34423
2020-05-28T04:26:34Z
2020-05-29T19:05:06Z
2020-05-29T19:05:06Z
2020-05-29T19:08:49Z
BUG: taking slices in _slice_take_blocks_ax0
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index e43c7bf887bcc..8e16d31b49150 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1337,7 +1337,8 @@ def _slice_take_blocks_ax0( # When filling blknos, make sure blknos is updated before appending to # blocks list, that way new blkno is exactly len(blocks). blocks = [] - for blkno, mgr_locs in libinternals.get_blkno_placements(blknos, group=True): + group = not only_slice + for blkno, mgr_locs in libinternals.get_blkno_placements(blknos, group=group): if blkno == -1: # If we've got here, fill_value was not lib.no_default diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index b8ca5f16e4060..993b644c6993b 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -1514,3 +1514,16 @@ def test_dataframe_series_extension_dtypes(): tm.assert_frame_equal(result, expected) result = df_ea + ser.astype("Int64") tm.assert_frame_equal(result, expected) + + +def test_dataframe_blockwise_slicelike(): + # GH#34367 + arr = np.random.randint(0, 1000, (100, 10)) + df1 = pd.DataFrame(arr) + df2 = df1.copy() + df2.iloc[0, [1, 3, 7]] = np.nan + + res = df1 + df2 + + expected = pd.DataFrame({i: df1[i] + df2[i] for i in df1.columns}) + tm.assert_frame_equal(res, expected)
- [x] closes #34367 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/34421
2020-05-28T03:16:31Z
2020-05-28T17:19:47Z
2020-05-28T17:19:47Z
2021-11-20T23:23:10Z
REF: move to_offset to liboffsets
diff --git a/pandas/_libs/tslibs/frequencies.pyx b/pandas/_libs/tslibs/frequencies.pyx index 13edf3c46152a..8246e24319dbd 100644 --- a/pandas/_libs/tslibs/frequencies.pyx +++ b/pandas/_libs/tslibs/frequencies.pyx @@ -1,22 +1,17 @@ -import re - cimport numpy as cnp cnp.import_array() from pandas._libs.tslibs.util cimport is_integer_object from pandas._libs.tslibs.offsets cimport is_offset_object - -# ---------------------------------------------------------------------- -# Constants - -# hack to handle WOM-1MON -opattern = re.compile( - r'([+\-]?\d*|[+\-]?\d*\.\d*)\s*([A-Za-z]+([\-][\dA-Za-z\-]+)?)' +from pandas._libs.tslibs.offsets import ( + INVALID_FREQ_ERR_MSG, + _dont_uppercase, + _lite_rule_alias, + base_and_stride, + opattern, ) -INVALID_FREQ_ERR_MSG = "Invalid frequency: {0}" - # --------------------------------------------------------------------- # Period codes @@ -103,27 +98,6 @@ _period_code_map.update({ "W": 4000, # Weekly "C": 5000}) # Custom Business Day -_lite_rule_alias = { - 'W': 'W-SUN', - 'Q': 'Q-DEC', - - 'A': 'A-DEC', # YearEnd(month=12), - 'Y': 'A-DEC', - 'AS': 'AS-JAN', # YearBegin(month=1), - 'YS': 'AS-JAN', - 'BA': 'BA-DEC', # BYearEnd(month=12), - 'BY': 'BA-DEC', - 'BAS': 'BAS-JAN', # BYearBegin(month=1), - 'BYS': 'BAS-JAN', - - 'Min': 'T', - 'min': 'T', - 'ms': 'L', - 'us': 'U', - 'ns': 'N'} - -_dont_uppercase = {'MS', 'ms'} - # Map attribute-name resolutions to resolution abbreviations _attrname_to_abbrevs = { "year": "A", @@ -223,36 +197,6 @@ cpdef get_freq_code(freqstr): return code, stride -cpdef base_and_stride(str freqstr): - """ - Return base freq and stride info from string representation - - Returns - ------- - base : str - stride : int - - Examples - -------- - _freq_and_stride('5Min') -> 'Min', 5 - """ - groups = opattern.match(freqstr) - - if not groups: - raise ValueError(f"Could not evaluate {freqstr}") - - stride = groups.group(1) - - if len(stride): - stride = int(stride) - else: - stride = 1 - - base = groups.group(2) - - return base, stride - - cpdef _period_str_to_code(str freqstr): freqstr = _lite_rule_alias.get(freqstr, freqstr) diff --git a/pandas/_libs/tslibs/offsets.pxd b/pandas/_libs/tslibs/offsets.pxd index e75cd8bdf1baf..69b878c77f0b8 100644 --- a/pandas/_libs/tslibs/offsets.pxd +++ b/pandas/_libs/tslibs/offsets.pxd @@ -1,3 +1,3 @@ -cdef to_offset(object obj) +cpdef to_offset(object obj) cdef bint is_offset_object(object obj) cdef bint is_tick_object(object obj) diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 8e5634253bd39..a32ffb8aa3689 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -1,6 +1,7 @@ import cython import operator +import re import time from typing import Any import warnings @@ -103,17 +104,6 @@ cdef bint is_tick_object(object obj): return isinstance(obj, Tick) -cdef to_offset(object obj): - """ - Wrap pandas.tseries.frequencies.to_offset to keep centralize runtime - imports - """ - if isinstance(obj, BaseOffset): - return obj - from pandas.tseries.frequencies import to_offset - return to_offset(obj) - - cdef datetime _as_datetime(datetime obj): if isinstance(obj, ABCTimestamp): return obj.to_pydatetime() @@ -3505,6 +3495,9 @@ CBMonthEnd = CustomBusinessMonthEnd CBMonthBegin = CustomBusinessMonthBegin CDay = CustomBusinessDay +# ---------------------------------------------------------------------- +# to_offset helpers + prefix_mapping = { offset._prefix: offset for offset in [ @@ -3542,6 +3535,224 @@ prefix_mapping = { ] } +_name_to_offset_map = { + "days": Day(1), + "hours": Hour(1), + "minutes": Minute(1), + "seconds": Second(1), + "milliseconds": Milli(1), + "microseconds": Micro(1), + "nanoseconds": Nano(1), +} + +# hack to handle WOM-1MON +opattern = re.compile( + r"([+\-]?\d*|[+\-]?\d*\.\d*)\s*([A-Za-z]+([\-][\dA-Za-z\-]+)?)" +) + +_lite_rule_alias = { + "W": "W-SUN", + "Q": "Q-DEC", + + "A": "A-DEC", # YearEnd(month=12), + "Y": "A-DEC", + "AS": "AS-JAN", # YearBegin(month=1), + "YS": "AS-JAN", + "BA": "BA-DEC", # BYearEnd(month=12), + "BY": "BA-DEC", + "BAS": "BAS-JAN", # BYearBegin(month=1), + "BYS": "BAS-JAN", + + "Min": "T", + "min": "T", + "ms": "L", + "us": "U", + "ns": "N", +} + +_dont_uppercase = {"MS", "ms"} + +INVALID_FREQ_ERR_MSG = "Invalid frequency: {0}" + +# TODO: still needed? +# cache of previously seen offsets +_offset_map = {} + + +cpdef base_and_stride(str freqstr): + """ + Return base freq and stride info from string representation + + Returns + ------- + base : str + stride : int + + Examples + -------- + _freq_and_stride('5Min') -> 'Min', 5 + """ + groups = opattern.match(freqstr) + + if not groups: + raise ValueError(f"Could not evaluate {freqstr}") + + stride = groups.group(1) + + if len(stride): + stride = int(stride) + else: + stride = 1 + + base = groups.group(2) + + return base, stride + + +# TODO: better name? +def _get_offset(name: str) -> BaseOffset: + """ + Return DateOffset object associated with rule name. + + Examples + -------- + _get_offset('EOM') --> BMonthEnd(1) + """ + if name not in _dont_uppercase: + name = name.upper() + name = _lite_rule_alias.get(name, name) + name = _lite_rule_alias.get(name.lower(), name) + else: + name = _lite_rule_alias.get(name, name) + + if name not in _offset_map: + try: + split = name.split("-") + klass = prefix_mapping[split[0]] + # handles case where there's no suffix (and will TypeError if too + # many '-') + offset = klass._from_name(*split[1:]) + except (ValueError, TypeError, KeyError) as err: + # bad prefix or suffix + raise ValueError(INVALID_FREQ_ERR_MSG.format(name)) from err + # cache + _offset_map[name] = offset + + return _offset_map[name] + + +cpdef to_offset(freq): + """ + Return DateOffset object from string or tuple representation + or datetime.timedelta object. + + Parameters + ---------- + freq : str, tuple, datetime.timedelta, DateOffset or None + + Returns + ------- + DateOffset or None + + Raises + ------ + ValueError + If freq is an invalid frequency + + See Also + -------- + DateOffset : Standard kind of date increment used for a date range. + + Examples + -------- + >>> to_offset("5min") + <5 * Minutes> + + >>> to_offset("1D1H") + <25 * Hours> + + >>> to_offset(("W", 2)) + <2 * Weeks: weekday=6> + + >>> to_offset((2, "B")) + <2 * BusinessDays> + + >>> to_offset(pd.Timedelta(days=1)) + <Day> + + >>> to_offset(Hour()) + <Hour> + """ + if freq is None: + return None + + if isinstance(freq, BaseOffset): + return freq + + if isinstance(freq, tuple): + name = freq[0] + stride = freq[1] + if isinstance(stride, str): + name, stride = stride, name + name, _ = base_and_stride(name) + delta = _get_offset(name) * stride + + elif isinstance(freq, timedelta): + from .timedeltas import Timedelta + + delta = None + freq = Timedelta(freq) + try: + for name in freq.components._fields: + offset = _name_to_offset_map[name] + stride = getattr(freq.components, name) + if stride != 0: + offset = stride * offset + if delta is None: + delta = offset + else: + delta = delta + offset + except ValueError as err: + raise ValueError(INVALID_FREQ_ERR_MSG.format(freq)) from err + + else: + delta = None + stride_sign = None + try: + split = re.split(opattern, freq) + if split[-1] != "" and not split[-1].isspace(): + # the last element must be blank + raise ValueError("last element must be blank") + for sep, stride, name in zip(split[0::4], split[1::4], split[2::4]): + if sep != "" and not sep.isspace(): + raise ValueError("separator must be spaces") + prefix = _lite_rule_alias.get(name) or name + if stride_sign is None: + stride_sign = -1 if stride.startswith("-") else 1 + if not stride: + stride = 1 + + from .resolution import Resolution # TODO: avoid runtime import + + if prefix in Resolution.reso_str_bump_map: + stride, name = Resolution.get_stride_from_decimal( + float(stride), prefix + ) + stride = int(stride) + offset = _get_offset(name) + offset = offset * int(np.fabs(stride) * stride_sign) + if delta is None: + delta = offset + else: + delta = delta + offset + except (ValueError, TypeError) as err: + raise ValueError(INVALID_FREQ_ERR_MSG.format(freq)) from err + + if delta is None: + raise ValueError(INVALID_FREQ_ERR_MSG.format(freq)) + + return delta + # ---------------------------------------------------------------------- # RelativeDelta Arithmetic diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index 350bf8c38e6bf..86cc7ff753660 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -17,7 +17,7 @@ get_freq_str, ) import pandas._libs.tslibs.offsets as liboffsets -from pandas._libs.tslibs.offsets import ApplyTypeError +from pandas._libs.tslibs.offsets import ApplyTypeError, _get_offset, _offset_map import pandas.compat as compat from pandas.compat.numpy import np_datetime64_compat from pandas.errors import PerformanceWarning @@ -27,7 +27,6 @@ from pandas.core.series import Series from pandas.io.pickle import read_pickle -from pandas.tseries.frequencies import _get_offset, _offset_map from pandas.tseries.holiday import USFederalHolidayCalendar import pandas.tseries.offsets as offsets from pandas.tseries.offsets import ( diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 084ad4294f9d0..47ae66ac4f91b 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -1,29 +1,21 @@ -from datetime import timedelta -import re -from typing import Dict, Optional +from typing import Optional import warnings import numpy as np from pandas._libs.algos import unique_deltas -from pandas._libs.tslibs import Timedelta, Timestamp +from pandas._libs.tslibs import Timestamp from pandas._libs.tslibs.ccalendar import MONTH_ALIASES, MONTH_NUMBERS, int_to_weekday from pandas._libs.tslibs.fields import build_field_sarray -import pandas._libs.tslibs.frequencies as libfreqs -from pandas._libs.tslibs.offsets import ( +from pandas._libs.tslibs.offsets import ( # noqa:F401 DateOffset, Day, - Hour, - Micro, - Milli, - Minute, - Nano, - Second, + _get_offset, _offset_to_period_map, - prefix_mapping, + to_offset, ) from pandas._libs.tslibs.parsing import get_rule_month -from pandas._libs.tslibs.resolution import Resolution, month_position_check +from pandas._libs.tslibs.resolution import month_position_check from pandas._libs.tslibs.timezones import UTC from pandas._libs.tslibs.tzconversion import tz_convert from pandas.util._decorators import cache_readonly @@ -47,9 +39,6 @@ # --------------------------------------------------------------------- # Offset names ("time rules") and related functions -#: cache of previously seen offsets -_offset_map: Dict[str, DateOffset] = {} - def get_period_alias(offset_str: str) -> Optional[str]: """ @@ -58,126 +47,6 @@ def get_period_alias(offset_str: str) -> Optional[str]: return _offset_to_period_map.get(offset_str, None) -_name_to_offset_map = { - "days": Day(1), - "hours": Hour(1), - "minutes": Minute(1), - "seconds": Second(1), - "milliseconds": Milli(1), - "microseconds": Micro(1), - "nanoseconds": Nano(1), -} - - -def to_offset(freq) -> Optional[DateOffset]: - """ - Return DateOffset object from string or tuple representation - or datetime.timedelta object. - - Parameters - ---------- - freq : str, tuple, datetime.timedelta, DateOffset or None - - Returns - ------- - DateOffset - None if freq is None. - - Raises - ------ - ValueError - If freq is an invalid frequency - - See Also - -------- - DateOffset : Standard kind of date increment used for a date range. - - Examples - -------- - >>> to_offset("5min") - <5 * Minutes> - - >>> to_offset("1D1H") - <25 * Hours> - - >>> to_offset(("W", 2)) - <2 * Weeks: weekday=6> - - >>> to_offset((2, "B")) - <2 * BusinessDays> - - >>> to_offset(pd.Timedelta(days=1)) - <Day> - - >>> to_offset(Hour()) - <Hour> - """ - if freq is None: - return None - - if isinstance(freq, DateOffset): - return freq - - if isinstance(freq, tuple): - name = freq[0] - stride = freq[1] - if isinstance(stride, str): - name, stride = stride, name - name, _ = libfreqs.base_and_stride(name) - delta = _get_offset(name) * stride - - elif isinstance(freq, timedelta): - delta = None - freq = Timedelta(freq) - try: - for name in freq.components._fields: - offset = _name_to_offset_map[name] - stride = getattr(freq.components, name) - if stride != 0: - offset = stride * offset - if delta is None: - delta = offset - else: - delta = delta + offset - except ValueError as err: - raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq)) from err - - else: - delta = None - stride_sign = None - try: - split = re.split(libfreqs.opattern, freq) - if split[-1] != "" and not split[-1].isspace(): - # the last element must be blank - raise ValueError("last element must be blank") - for sep, stride, name in zip(split[0::4], split[1::4], split[2::4]): - if sep != "" and not sep.isspace(): - raise ValueError("separator must be spaces") - prefix = libfreqs._lite_rule_alias.get(name) or name - if stride_sign is None: - stride_sign = -1 if stride.startswith("-") else 1 - if not stride: - stride = 1 - if prefix in Resolution.reso_str_bump_map: - stride, name = Resolution.get_stride_from_decimal( - float(stride), prefix - ) - stride = int(stride) - offset = _get_offset(name) - offset = offset * int(np.fabs(stride) * stride_sign) - if delta is None: - delta = offset - else: - delta = delta + offset - except (ValueError, TypeError) as err: - raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq)) from err - - if delta is None: - raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq)) - - return delta - - def get_offset(name: str) -> DateOffset: """ Return DateOffset object associated with rule name. @@ -197,37 +66,6 @@ def get_offset(name: str) -> DateOffset: return _get_offset(name) -def _get_offset(name: str) -> DateOffset: - """ - Return DateOffset object associated with rule name. - - Examples - -------- - _get_offset('EOM') --> BMonthEnd(1) - """ - if name not in libfreqs._dont_uppercase: - name = name.upper() - name = libfreqs._lite_rule_alias.get(name, name) - name = libfreqs._lite_rule_alias.get(name.lower(), name) - else: - name = libfreqs._lite_rule_alias.get(name, name) - - if name not in _offset_map: - try: - split = name.split("-") - klass = prefix_mapping[split[0]] - # handles case where there's no suffix (and will TypeError if too - # many '-') - offset = klass._from_name(*split[1:]) - except (ValueError, TypeError, KeyError) as err: - # bad prefix or suffix - raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(name)) from err - # cache - _offset_map[name] = offset - - return _offset_map[name] - - # --------------------------------------------------------------------- # Period codes
https://api.github.com/repos/pandas-dev/pandas/pulls/34420
2020-05-28T02:16:20Z
2020-05-28T17:11:55Z
2020-05-28T17:11:55Z
2020-05-28T17:20:37Z
REF: make remaining offset classes cdef
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 8e5634253bd39..37d9f48de5146 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -3347,7 +3347,7 @@ cdef class CustomBusinessDay(BusinessDay): return np.is_busday(day64, busdaycal=self.calendar) -class CustomBusinessHour(BusinessHour): +cdef class CustomBusinessHour(BusinessHour): """ DateOffset subclass representing possibly n custom business days. """ @@ -3389,7 +3389,7 @@ class CustomBusinessHour(BusinessHour): ) -class _CustomBusinessMonth(BusinessMixin, MonthOffset): +cdef class _CustomBusinessMonth(BusinessMixin): """ DateOffset subclass representing custom business month(s). @@ -3416,9 +3416,6 @@ class _CustomBusinessMonth(BusinessMixin, MonthOffset): ["n", "normalize", "weekmask", "holidays", "calendar", "offset"] ) - is_on_offset = BaseOffset.is_on_offset # override MonthOffset method - apply_index = BaseOffset.apply_index # override MonthOffset method - def __init__( self, n=1, @@ -3490,11 +3487,11 @@ class _CustomBusinessMonth(BusinessMixin, MonthOffset): return result -class CustomBusinessMonthEnd(_CustomBusinessMonth): +cdef class CustomBusinessMonthEnd(_CustomBusinessMonth): _prefix = "CBM" -class CustomBusinessMonthBegin(_CustomBusinessMonth): +cdef class CustomBusinessMonthBegin(_CustomBusinessMonth): _prefix = "CBMS"
After this its just down to 1) a pass to clean up pickle-based kludges that were needed when these were mixed python/cython 2) privatize things in liboffsets no longer needed externally 3) move to_offset int liboffsets
https://api.github.com/repos/pandas-dev/pandas/pulls/34419
2020-05-28T00:20:04Z
2020-05-28T17:13:18Z
2020-05-28T17:13:18Z
2020-05-28T17:21:51Z
DEPR: deprecate non keyword arguments in read_excel
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 20e2cce1a3dfa..a014729e86a7e 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -588,6 +588,10 @@ Deprecations version 1.1. All other arguments should be given as keyword arguments (:issue:`27573`). +- Passing any arguments but the first 2 to :func:`read_excel` as + positional arguments is deprecated since version 1.1. All other + arguments should be given as keyword arguments (:issue:`27573`). + - :func:`pandas.api.types.is_categorical` is deprecated and will be removed in a future version; use `:func:pandas.api.types.is_categorical_dtype` instead (:issue:`33385`) - :meth:`Index.get_value` is deprecated and will be removed in a future version (:issue:`19728`) - :meth:`DateOffset.__call__` is deprecated and will be removed in a future version, use ``offset + other`` instead (:issue:`34171`) diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index d1139f640cef4..d55bdffe689f2 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -8,7 +8,7 @@ from pandas._libs.parsers import STR_NA_VALUES from pandas.errors import EmptyDataError -from pandas.util._decorators import Appender +from pandas.util._decorators import Appender, deprecate_nonkeyword_arguments from pandas.core.dtypes.common import is_bool, is_float, is_integer, is_list_like @@ -266,6 +266,7 @@ ) +@deprecate_nonkeyword_arguments(allowed_args=2, version="2.0") @Appender(_read_excel_doc) def read_excel( io, diff --git a/pandas/tests/io/excel/test_odf.py b/pandas/tests/io/excel/test_odf.py index b9a3e8b59b133..d6c6399f082c6 100644 --- a/pandas/tests/io/excel/test_odf.py +++ b/pandas/tests/io/excel/test_odf.py @@ -33,7 +33,7 @@ def test_read_writer_table(): columns=["Column 1", "Unnamed: 2", "Column 3"], ) - result = pd.read_excel("writertable.odt", "Table1", index_col=0) + result = pd.read_excel("writertable.odt", sheet_name="Table1", index_col=0) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py index 99447c03e89af..fd1533dd65dc4 100644 --- a/pandas/tests/io/excel/test_readers.py +++ b/pandas/tests/io/excel/test_readers.py @@ -1,4 +1,3 @@ -from collections import OrderedDict import contextlib from datetime import datetime, time from functools import partial @@ -136,13 +135,19 @@ def test_usecols_int(self, read_ext, df_ref): msg = "Passing an integer for `usecols`" with pytest.raises(ValueError, match=msg): with ignore_xlrd_time_clock_warning(): - pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols=3) + pd.read_excel( + "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols=3 + ) # usecols as int with pytest.raises(ValueError, match=msg): with ignore_xlrd_time_clock_warning(): pd.read_excel( - "test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols=3 + "test1" + read_ext, + sheet_name="Sheet2", + skiprows=[1], + index_col=0, + usecols=3, ) def test_usecols_list(self, read_ext, df_ref): @@ -151,10 +156,14 @@ def test_usecols_list(self, read_ext, df_ref): df_ref = df_ref.reindex(columns=["B", "C"]) df1 = pd.read_excel( - "test1" + read_ext, "Sheet1", index_col=0, usecols=[0, 2, 3] + "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols=[0, 2, 3] ) df2 = pd.read_excel( - "test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols=[0, 2, 3] + "test1" + read_ext, + sheet_name="Sheet2", + skiprows=[1], + index_col=0, + usecols=[0, 2, 3], ) # TODO add index to xls file) @@ -166,9 +175,15 @@ def test_usecols_str(self, read_ext, df_ref): pytest.xfail("Sheets containing datetimes not supported by pyxlsb") df1 = df_ref.reindex(columns=["A", "B", "C"]) - df2 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols="A:D") + df2 = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols="A:D" + ) df3 = pd.read_excel( - "test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols="A:D" + "test1" + read_ext, + sheet_name="Sheet2", + skiprows=[1], + index_col=0, + usecols="A:D", ) # TODO add index to xls, read xls ignores index name ? @@ -176,18 +191,30 @@ def test_usecols_str(self, read_ext, df_ref): tm.assert_frame_equal(df3, df1, check_names=False) df1 = df_ref.reindex(columns=["B", "C"]) - df2 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols="A,C,D") + df2 = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols="A,C,D" + ) df3 = pd.read_excel( - "test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols="A,C,D" + "test1" + read_ext, + sheet_name="Sheet2", + skiprows=[1], + index_col=0, + usecols="A,C,D", ) # TODO add index to xls file tm.assert_frame_equal(df2, df1, check_names=False) tm.assert_frame_equal(df3, df1, check_names=False) df1 = df_ref.reindex(columns=["B", "C"]) - df2 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols="A,C:D") + df2 = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols="A,C:D" + ) df3 = pd.read_excel( - "test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols="A,C:D" + "test1" + read_ext, + sheet_name="Sheet2", + skiprows=[1], + index_col=0, + usecols="A,C:D", ) tm.assert_frame_equal(df2, df1, check_names=False) tm.assert_frame_equal(df3, df1, check_names=False) @@ -201,7 +228,7 @@ def test_usecols_diff_positional_int_columns_order(self, read_ext, usecols, df_r expected = df_ref[["A", "C"]] result = pd.read_excel( - "test1" + read_ext, "Sheet1", index_col=0, usecols=usecols + "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols=usecols ) tm.assert_frame_equal(result, expected, check_names=False) @@ -210,7 +237,7 @@ def test_usecols_diff_positional_str_columns_order(self, read_ext, usecols, df_r expected = df_ref[["B", "D"]] expected.index = range(len(expected)) - result = pd.read_excel("test1" + read_ext, "Sheet1", usecols=usecols) + result = pd.read_excel("test1" + read_ext, sheet_name="Sheet1", usecols=usecols) tm.assert_frame_equal(result, expected, check_names=False) def test_read_excel_without_slicing(self, read_ext, df_ref): @@ -218,7 +245,7 @@ def test_read_excel_without_slicing(self, read_ext, df_ref): pytest.xfail("Sheets containing datetimes not supported by pyxlsb") expected = df_ref - result = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0) + result = pd.read_excel("test1" + read_ext, sheet_name="Sheet1", index_col=0) tm.assert_frame_equal(result, expected, check_names=False) def test_usecols_excel_range_str(self, read_ext, df_ref): @@ -227,7 +254,7 @@ def test_usecols_excel_range_str(self, read_ext, df_ref): expected = df_ref[["C", "D"]] result = pd.read_excel( - "test1" + read_ext, "Sheet1", index_col=0, usecols="A,D:E" + "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols="A,D:E" ) tm.assert_frame_equal(result, expected, check_names=False) @@ -235,19 +262,24 @@ def test_usecols_excel_range_str_invalid(self, read_ext): msg = "Invalid column name: E1" with pytest.raises(ValueError, match=msg): - pd.read_excel("test1" + read_ext, "Sheet1", usecols="D:E1") + pd.read_excel("test1" + read_ext, sheet_name="Sheet1", usecols="D:E1") def test_index_col_label_error(self, read_ext): msg = "list indices must be integers.*, not str" with pytest.raises(TypeError, match=msg): pd.read_excel( - "test1" + read_ext, "Sheet1", index_col=["A"], usecols=["A", "C"] + "test1" + read_ext, + sheet_name="Sheet1", + index_col=["A"], + usecols=["A", "C"], ) def test_index_col_empty(self, read_ext): # see gh-9208 - result = pd.read_excel("test1" + read_ext, "Sheet3", index_col=["A", "B", "C"]) + result = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet3", index_col=["A", "B", "C"] + ) expected = DataFrame( columns=["D", "E", "F"], index=MultiIndex(levels=[[]] * 3, codes=[[]] * 3, names=["A", "B", "C"]), @@ -257,7 +289,9 @@ def test_index_col_empty(self, read_ext): @pytest.mark.parametrize("index_col", [None, 2]) def test_index_col_with_unnamed(self, read_ext, index_col): # see gh-18792 - result = pd.read_excel("test1" + read_ext, "Sheet4", index_col=index_col) + result = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet4", index_col=index_col + ) expected = DataFrame( [["i1", "a", "x"], ["i2", "b", "y"]], columns=["Unnamed: 0", "col1", "col2"] ) @@ -286,7 +320,7 @@ def test_usecols_wrong_type(self, read_ext): def test_excel_stop_iterator(self, read_ext): - parsed = pd.read_excel("test2" + read_ext, "Sheet1") + parsed = pd.read_excel("test2" + read_ext, sheet_name="Sheet1") expected = DataFrame([["aaaa", "bbbbb"]], columns=["Test", "Test1"]) tm.assert_frame_equal(parsed, expected) @@ -294,7 +328,7 @@ def test_excel_cell_error_na(self, read_ext): if pd.read_excel.keywords["engine"] == "pyxlsb": pytest.xfail("Sheets containing datetimes not supported by pyxlsb") - parsed = pd.read_excel("test3" + read_ext, "Sheet1") + parsed = pd.read_excel("test3" + read_ext, sheet_name="Sheet1") expected = DataFrame([[np.nan]], columns=["Test"]) tm.assert_frame_equal(parsed, expected) @@ -302,13 +336,17 @@ def test_excel_table(self, read_ext, df_ref): if pd.read_excel.keywords["engine"] == "pyxlsb": pytest.xfail("Sheets containing datetimes not supported by pyxlsb") - df1 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0) - df2 = pd.read_excel("test1" + read_ext, "Sheet2", skiprows=[1], index_col=0) + df1 = pd.read_excel("test1" + read_ext, sheet_name="Sheet1", index_col=0) + df2 = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet2", skiprows=[1], index_col=0 + ) # TODO add index to file tm.assert_frame_equal(df1, df_ref, check_names=False) tm.assert_frame_equal(df2, df_ref, check_names=False) - df3 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, skipfooter=1) + df3 = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet1", index_col=0, skipfooter=1 + ) tm.assert_frame_equal(df3, df1.iloc[:-1]) def test_reader_special_dtypes(self, read_ext): @@ -316,50 +354,49 @@ def test_reader_special_dtypes(self, read_ext): pytest.xfail("Sheets containing datetimes not supported by pyxlsb") expected = DataFrame.from_dict( - OrderedDict( - [ - ("IntCol", [1, 2, -3, 4, 0]), - ("FloatCol", [1.25, 2.25, 1.83, 1.92, 0.0000000005]), - ("BoolCol", [True, False, True, True, False]), - ("StrCol", [1, 2, 3, 4, 5]), - # GH5394 - this is why convert_float isn't vectorized - ("Str2Col", ["a", 3, "c", "d", "e"]), - ( - "DateCol", - [ - datetime(2013, 10, 30), - datetime(2013, 10, 31), - datetime(1905, 1, 1), - datetime(2013, 12, 14), - datetime(2015, 3, 14), - ], - ), - ] - ) + { + "IntCol": [1, 2, -3, 4, 0], + "FloatCol": [1.25, 2.25, 1.83, 1.92, 0.0000000005], + "BoolCol": [True, False, True, True, False], + "StrCol": [1, 2, 3, 4, 5], + # GH5394 - this is why convert_float isn't vectorized + "Str2Col": ["a", 3, "c", "d", "e"], + "DateCol": [ + datetime(2013, 10, 30), + datetime(2013, 10, 31), + datetime(1905, 1, 1), + datetime(2013, 12, 14), + datetime(2015, 3, 14), + ], + }, ) basename = "test_types" # should read in correctly and infer types - actual = pd.read_excel(basename + read_ext, "Sheet1") + actual = pd.read_excel(basename + read_ext, sheet_name="Sheet1") tm.assert_frame_equal(actual, expected) # if not coercing number, then int comes in as float float_expected = expected.copy() float_expected["IntCol"] = float_expected["IntCol"].astype(float) float_expected.loc[float_expected.index[1], "Str2Col"] = 3.0 - actual = pd.read_excel(basename + read_ext, "Sheet1", convert_float=False) + actual = pd.read_excel( + basename + read_ext, sheet_name="Sheet1", convert_float=False + ) tm.assert_frame_equal(actual, float_expected) # check setting Index (assuming xls and xlsx are the same here) for icol, name in enumerate(expected.columns): - actual = pd.read_excel(basename + read_ext, "Sheet1", index_col=icol) + actual = pd.read_excel( + basename + read_ext, sheet_name="Sheet1", index_col=icol + ) exp = expected.set_index(name) tm.assert_frame_equal(actual, exp) # convert_float and converters should be different but both accepted expected["StrCol"] = expected["StrCol"].apply(str) actual = pd.read_excel( - basename + read_ext, "Sheet1", converters={"StrCol": str} + basename + read_ext, sheet_name="Sheet1", converters={"StrCol": str} ) tm.assert_frame_equal(actual, expected) @@ -367,7 +404,7 @@ def test_reader_special_dtypes(self, read_ext): no_convert_float["StrCol"] = no_convert_float["StrCol"].apply(str) actual = pd.read_excel( basename + read_ext, - "Sheet1", + sheet_name="Sheet1", convert_float=False, converters={"StrCol": str}, ) @@ -379,14 +416,12 @@ def test_reader_converters(self, read_ext): basename = "test_converters" expected = DataFrame.from_dict( - OrderedDict( - [ - ("IntCol", [1, 2, -3, -1000, 0]), - ("FloatCol", [12.5, np.nan, 18.3, 19.2, 0.000000005]), - ("BoolCol", ["Found", "Found", "Found", "Not found", "Found"]), - ("StrCol", ["1", np.nan, "3", "4", "5"]), - ] - ) + { + "IntCol": [1, 2, -3, -1000, 0], + "FloatCol": [12.5, np.nan, 18.3, 19.2, 0.000000005], + "BoolCol": ["Found", "Found", "Found", "Not found", "Found"], + "StrCol": ["1", np.nan, "3", "4", "5"], + } ) converters = { @@ -398,7 +433,9 @@ def test_reader_converters(self, read_ext): # should read in correctly and set types of single cells (not array # dtypes) - actual = pd.read_excel(basename + read_ext, "Sheet1", converters=converters) + actual = pd.read_excel( + basename + read_ext, sheet_name="Sheet1", converters=converters + ) tm.assert_frame_equal(actual, expected) def test_reader_dtype(self, read_ext): @@ -483,7 +520,7 @@ def test_reader_spaces(self, read_ext): tm.assert_frame_equal(actual, expected) def test_reading_all_sheets(self, read_ext): - # Test reading all sheetnames by setting sheetname to None, + # Test reading all sheet names by setting sheet_name to None, # Ensure a dict is returned. # See PR #9450 basename = "test_multisheet" @@ -496,7 +533,7 @@ def test_reading_all_sheets(self, read_ext): assert expected_keys == list(dfs.keys()) def test_reading_multiple_specific_sheets(self, read_ext): - # Test reading specific sheetnames by specifying a mixed list + # Test reading specific sheet names by specifying a mixed list # of integers and strings, and confirm that duplicated sheet # references (positions/names) are removed properly. # Ensure a dict is returned @@ -510,7 +547,7 @@ def test_reading_multiple_specific_sheets(self, read_ext): assert len(expected_keys) == len(dfs.keys()) def test_reading_all_sheets_with_blank(self, read_ext): - # Test reading all sheetnames by setting sheetname to None, + # Test reading all sheet names by setting sheet_name to None, # In the case where some sheets are blank. # Issue #11711 basename = "blank_with_header" @@ -520,12 +557,12 @@ def test_reading_all_sheets_with_blank(self, read_ext): # GH6403 def test_read_excel_blank(self, read_ext): - actual = pd.read_excel("blank" + read_ext, "Sheet1") + actual = pd.read_excel("blank" + read_ext, sheet_name="Sheet1") tm.assert_frame_equal(actual, DataFrame()) def test_read_excel_blank_with_header(self, read_ext): expected = DataFrame(columns=["col_1", "col_2"]) - actual = pd.read_excel("blank_with_header" + read_ext, "Sheet1") + actual = pd.read_excel("blank_with_header" + read_ext, sheet_name="Sheet1") tm.assert_frame_equal(actual, expected) def test_date_conversion_overflow(self, read_ext): @@ -569,9 +606,9 @@ def test_sheet_name(self, read_ext, df_ref): def test_excel_read_buffer(self, read_ext): pth = "test1" + read_ext - expected = pd.read_excel(pth, "Sheet1", index_col=0) + expected = pd.read_excel(pth, sheet_name="Sheet1", index_col=0) with open(pth, "rb") as f: - actual = pd.read_excel(f, "Sheet1", index_col=0) + actual = pd.read_excel(f, sheet_name="Sheet1", index_col=0) tm.assert_frame_equal(expected, actual) def test_bad_engine_raises(self, read_ext): @@ -626,10 +663,10 @@ def test_read_from_pathlib_path(self, read_ext): from pathlib import Path str_path = "test1" + read_ext - expected = pd.read_excel(str_path, "Sheet1", index_col=0) + expected = pd.read_excel(str_path, sheet_name="Sheet1", index_col=0) path_obj = Path("test1" + read_ext) - actual = pd.read_excel(path_obj, "Sheet1", index_col=0) + actual = pd.read_excel(path_obj, sheet_name="Sheet1", index_col=0) tm.assert_frame_equal(expected, actual) @@ -641,10 +678,10 @@ def test_read_from_py_localpath(self, read_ext): from py.path import local as LocalPath str_path = os.path.join("test1" + read_ext) - expected = pd.read_excel(str_path, "Sheet1", index_col=0) + expected = pd.read_excel(str_path, sheet_name="Sheet1", index_col=0) path_obj = LocalPath().join("test1" + read_ext) - actual = pd.read_excel(path_obj, "Sheet1", index_col=0) + actual = pd.read_excel(path_obj, sheet_name="Sheet1", index_col=0) tm.assert_frame_equal(expected, actual) @@ -654,7 +691,7 @@ def test_close_from_py_localpath(self, read_ext): # GH31467 str_path = os.path.join("test1" + read_ext) with open(str_path, "rb") as f: - x = pd.read_excel(f, "Sheet1", index_col=0) + x = pd.read_excel(f, sheet_name="Sheet1", index_col=0) del x # should not throw an exception because the passed file was closed f.read() @@ -682,10 +719,10 @@ def test_reader_seconds(self, read_ext): } ) - actual = pd.read_excel("times_1900" + read_ext, "Sheet1") + actual = pd.read_excel("times_1900" + read_ext, sheet_name="Sheet1") tm.assert_frame_equal(actual, expected) - actual = pd.read_excel("times_1904" + read_ext, "Sheet1") + actual = pd.read_excel("times_1904" + read_ext, sheet_name="Sheet1") tm.assert_frame_equal(actual, expected) def test_read_excel_multiindex(self, read_ext): @@ -707,52 +744,66 @@ def test_read_excel_multiindex(self, read_ext): columns=mi, ) - actual = pd.read_excel(mi_file, "mi_column", header=[0, 1], index_col=0) + actual = pd.read_excel( + mi_file, sheet_name="mi_column", header=[0, 1], index_col=0 + ) tm.assert_frame_equal(actual, expected) # "mi_index" sheet expected.index = mi expected.columns = ["a", "b", "c", "d"] - actual = pd.read_excel(mi_file, "mi_index", index_col=[0, 1]) + actual = pd.read_excel(mi_file, sheet_name="mi_index", index_col=[0, 1]) tm.assert_frame_equal(actual, expected, check_names=False) # "both" sheet expected.columns = mi - actual = pd.read_excel(mi_file, "both", index_col=[0, 1], header=[0, 1]) + actual = pd.read_excel( + mi_file, sheet_name="both", index_col=[0, 1], header=[0, 1] + ) tm.assert_frame_equal(actual, expected, check_names=False) # "mi_index_name" sheet expected.columns = ["a", "b", "c", "d"] expected.index = mi.set_names(["ilvl1", "ilvl2"]) - actual = pd.read_excel(mi_file, "mi_index_name", index_col=[0, 1]) + actual = pd.read_excel(mi_file, sheet_name="mi_index_name", index_col=[0, 1]) tm.assert_frame_equal(actual, expected) # "mi_column_name" sheet expected.index = list(range(4)) expected.columns = mi.set_names(["c1", "c2"]) - actual = pd.read_excel(mi_file, "mi_column_name", header=[0, 1], index_col=0) + actual = pd.read_excel( + mi_file, sheet_name="mi_column_name", header=[0, 1], index_col=0 + ) tm.assert_frame_equal(actual, expected) # see gh-11317 # "name_with_int" sheet expected.columns = mi.set_levels([1, 2], level=1).set_names(["c1", "c2"]) - actual = pd.read_excel(mi_file, "name_with_int", index_col=0, header=[0, 1]) + actual = pd.read_excel( + mi_file, sheet_name="name_with_int", index_col=0, header=[0, 1] + ) tm.assert_frame_equal(actual, expected) # "both_name" sheet expected.columns = mi.set_names(["c1", "c2"]) expected.index = mi.set_names(["ilvl1", "ilvl2"]) - actual = pd.read_excel(mi_file, "both_name", index_col=[0, 1], header=[0, 1]) + actual = pd.read_excel( + mi_file, sheet_name="both_name", index_col=[0, 1], header=[0, 1] + ) tm.assert_frame_equal(actual, expected) # "both_skiprows" sheet actual = pd.read_excel( - mi_file, "both_name_skiprows", index_col=[0, 1], header=[0, 1], skiprows=2 + mi_file, + sheet_name="both_name_skiprows", + index_col=[0, 1], + header=[0, 1], + skiprows=2, ) tm.assert_frame_equal(actual, expected) @@ -761,7 +812,7 @@ def test_read_excel_multiindex_header_only(self, read_ext): # # Don't try to parse a header name if there isn't one. mi_file = "testmultiindex" + read_ext - result = pd.read_excel(mi_file, "index_col_none", header=[0, 1]) + result = pd.read_excel(mi_file, sheet_name="index_col_none", header=[0, 1]) exp_columns = MultiIndex.from_product([("A", "B"), ("key", "val")]) expected = DataFrame([[1, 2, 3, 4]] * 2, columns=exp_columns) @@ -799,12 +850,12 @@ def test_excel_old_index_format(self, read_ext): expected = pd.DataFrame(data, index=si, columns=columns) - actual = pd.read_excel(filename, "single_names", index_col=0) + actual = pd.read_excel(filename, sheet_name="single_names", index_col=0) tm.assert_frame_equal(actual, expected) expected.index = mi - actual = pd.read_excel(filename, "multi_names", index_col=[0, 1]) + actual = pd.read_excel(filename, sheet_name="multi_names", index_col=[0, 1]) tm.assert_frame_equal(actual, expected) # The analogous versions of the "names" version data @@ -831,12 +882,12 @@ def test_excel_old_index_format(self, read_ext): expected = pd.DataFrame(data, index=si, columns=columns) - actual = pd.read_excel(filename, "single_no_names", index_col=0) + actual = pd.read_excel(filename, sheet_name="single_no_names", index_col=0) tm.assert_frame_equal(actual, expected) expected.index = mi - actual = pd.read_excel(filename, "multi_no_names", index_col=[0, 1]) + actual = pd.read_excel(filename, sheet_name="multi_no_names", index_col=[0, 1]) tm.assert_frame_equal(actual, expected, check_names=False) def test_read_excel_bool_header_arg(self, read_ext): @@ -858,7 +909,7 @@ def test_read_excel_skiprows_list(self, read_ext): pytest.xfail("Sheets containing datetimes not supported by pyxlsb") actual = pd.read_excel( - "testskiprows" + read_ext, "skiprows_list", skiprows=[0, 2] + "testskiprows" + read_ext, sheet_name="skiprows_list", skiprows=[0, 2] ) expected = DataFrame( [ @@ -872,7 +923,9 @@ def test_read_excel_skiprows_list(self, read_ext): tm.assert_frame_equal(actual, expected) actual = pd.read_excel( - "testskiprows" + read_ext, "skiprows_list", skiprows=np.array([0, 2]) + "testskiprows" + read_ext, + sheet_name="skiprows_list", + skiprows=np.array([0, 2]), ) tm.assert_frame_equal(actual, expected) @@ -902,19 +955,25 @@ def test_read_excel_squeeze(self, read_ext): # GH 12157 f = "test_squeeze" + read_ext - actual = pd.read_excel(f, "two_columns", index_col=0, squeeze=True) + actual = pd.read_excel(f, sheet_name="two_columns", index_col=0, squeeze=True) expected = pd.Series([2, 3, 4], [4, 5, 6], name="b") expected.index.name = "a" tm.assert_series_equal(actual, expected) - actual = pd.read_excel(f, "two_columns", squeeze=True) + actual = pd.read_excel(f, sheet_name="two_columns", squeeze=True) expected = pd.DataFrame({"a": [4, 5, 6], "b": [2, 3, 4]}) tm.assert_frame_equal(actual, expected) - actual = pd.read_excel(f, "one_column", squeeze=True) + actual = pd.read_excel(f, sheet_name="one_column", squeeze=True) expected = pd.Series([1, 2, 3], name="a") tm.assert_series_equal(actual, expected) + def test_deprecated_kwargs(self, read_ext): + with tm.assert_produces_warning(FutureWarning, raise_on_extra_warnings=False): + pd.read_excel("test1" + read_ext, "Sheet1", 0) + + pd.read_excel("test1" + read_ext) + class TestExcelFileRead: @pytest.fixture(autouse=True) @@ -929,7 +988,7 @@ def cd_and_set_engine(self, engine, datapath, monkeypatch): def test_excel_passes_na(self, read_ext): with pd.ExcelFile("test4" + read_ext) as excel: parsed = pd.read_excel( - excel, "Sheet1", keep_default_na=False, na_values=["apple"] + excel, sheet_name="Sheet1", keep_default_na=False, na_values=["apple"] ) expected = DataFrame( [["NA"], [1], ["NA"], [np.nan], ["rabbit"]], columns=["Test"] @@ -938,7 +997,7 @@ def test_excel_passes_na(self, read_ext): with pd.ExcelFile("test4" + read_ext) as excel: parsed = pd.read_excel( - excel, "Sheet1", keep_default_na=True, na_values=["apple"] + excel, sheet_name="Sheet1", keep_default_na=True, na_values=["apple"] ) expected = DataFrame( [[np.nan], [1], [np.nan], [np.nan], ["rabbit"]], columns=["Test"] @@ -948,7 +1007,7 @@ def test_excel_passes_na(self, read_ext): # 13967 with pd.ExcelFile("test5" + read_ext) as excel: parsed = pd.read_excel( - excel, "Sheet1", keep_default_na=False, na_values=["apple"] + excel, sheet_name="Sheet1", keep_default_na=False, na_values=["apple"] ) expected = DataFrame( [["1.#QNAN"], [1], ["nan"], [np.nan], ["rabbit"]], columns=["Test"] @@ -957,7 +1016,7 @@ def test_excel_passes_na(self, read_ext): with pd.ExcelFile("test5" + read_ext) as excel: parsed = pd.read_excel( - excel, "Sheet1", keep_default_na=True, na_values=["apple"] + excel, sheet_name="Sheet1", keep_default_na=True, na_values=["apple"] ) expected = DataFrame( [[np.nan], [1], [np.nan], [np.nan], ["rabbit"]], columns=["Test"] @@ -974,7 +1033,11 @@ def test_excel_passes_na_filter(self, read_ext, na_filter): with pd.ExcelFile("test5" + read_ext) as excel: parsed = pd.read_excel( - excel, "Sheet1", keep_default_na=True, na_values=["apple"], **kwargs + excel, + sheet_name="Sheet1", + keep_default_na=True, + na_values=["apple"], + **kwargs, ) if na_filter is False: @@ -1003,8 +1066,8 @@ def test_excel_table_sheet_by_index(self, read_ext, df_ref): pytest.xfail("Sheets containing datetimes not supported by pyxlsb") with pd.ExcelFile("test1" + read_ext) as excel: - df1 = pd.read_excel(excel, 0, index_col=0) - df2 = pd.read_excel(excel, 1, skiprows=[1], index_col=0) + df1 = pd.read_excel(excel, sheet_name=0, index_col=0) + df2 = pd.read_excel(excel, sheet_name=1, skiprows=[1], index_col=0) tm.assert_frame_equal(df1, df_ref, check_names=False) tm.assert_frame_equal(df2, df_ref, check_names=False) @@ -1015,7 +1078,7 @@ def test_excel_table_sheet_by_index(self, read_ext, df_ref): tm.assert_frame_equal(df2, df_ref, check_names=False) with pd.ExcelFile("test1" + read_ext) as excel: - df3 = pd.read_excel(excel, 0, index_col=0, skipfooter=1) + df3 = pd.read_excel(excel, sheet_name=0, index_col=0, skipfooter=1) tm.assert_frame_equal(df3, df1.iloc[:-1]) with pd.ExcelFile("test1" + read_ext) as excel: @@ -1043,11 +1106,11 @@ def test_sheet_name(self, read_ext, df_ref): def test_excel_read_buffer(self, engine, read_ext): pth = "test1" + read_ext - expected = pd.read_excel(pth, "Sheet1", index_col=0, engine=engine) + expected = pd.read_excel(pth, sheet_name="Sheet1", index_col=0, engine=engine) with open(pth, "rb") as f: with pd.ExcelFile(f) as xls: - actual = pd.read_excel(xls, "Sheet1", index_col=0) + actual = pd.read_excel(xls, sheet_name="Sheet1", index_col=0) tm.assert_frame_equal(expected, actual) @@ -1055,7 +1118,7 @@ def test_reader_closes_file(self, engine, read_ext): with open("test1" + read_ext, "rb") as f: with pd.ExcelFile(f) as xlsx: # parses okay - pd.read_excel(xlsx, "Sheet1", index_col=0, engine=engine) + pd.read_excel(xlsx, sheet_name="Sheet1", index_col=0, engine=engine) assert f.closed diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py index 1692e1a8a0dd3..65d22f2f943e5 100644 --- a/pandas/tests/io/excel/test_writers.py +++ b/pandas/tests/io/excel/test_writers.py @@ -64,7 +64,9 @@ def test_read_one_empty_col_no_header(self, ext, header, expected): with tm.ensure_clean(ext) as path: df.to_excel(path, filename, index=False, header=False) - result = pd.read_excel(path, filename, usecols=[0], header=header) + result = pd.read_excel( + path, sheet_name=filename, usecols=[0], header=header + ) tm.assert_frame_equal(result, expected) @@ -80,7 +82,9 @@ def test_read_one_empty_col_with_header(self, ext, header, expected): with tm.ensure_clean(ext) as path: df.to_excel(path, "with_header", index=False, header=True) - result = pd.read_excel(path, filename, usecols=[0], header=header) + result = pd.read_excel( + path, sheet_name=filename, usecols=[0], header=header + ) tm.assert_frame_equal(result, expected) @@ -100,10 +104,13 @@ def test_set_column_names_in_parameter(self, ext): with ExcelFile(pth) as reader: xlsdf_no_head = pd.read_excel( - reader, "Data_no_head", header=None, names=["A", "B"] + reader, sheet_name="Data_no_head", header=None, names=["A", "B"] ) xlsdf_with_head = pd.read_excel( - reader, "Data_with_head", index_col=None, names=["A", "B"] + reader, + sheet_name="Data_with_head", + index_col=None, + names=["A", "B"], ) tm.assert_frame_equal(xlsdf_no_head, refdf) @@ -326,13 +333,13 @@ def test_excel_sheet_by_name_raise(self, path): gt.to_excel(path) xl = ExcelFile(path) - df = pd.read_excel(xl, 0, index_col=0) + df = pd.read_excel(xl, sheet_name=0, index_col=0) tm.assert_frame_equal(gt, df) msg = "No sheet named <'0'>" with pytest.raises(xlrd.XLRDError, match=msg): - pd.read_excel(xl, "0") + pd.read_excel(xl, sheet_name="0") def test_excel_writer_context_manager(self, frame, path): with ExcelWriter(path) as writer: @@ -342,8 +349,8 @@ def test_excel_writer_context_manager(self, frame, path): frame2.to_excel(writer, "Data2") with ExcelFile(path) as reader: - found_df = pd.read_excel(reader, "Data1", index_col=0) - found_df2 = pd.read_excel(reader, "Data2", index_col=0) + found_df = pd.read_excel(reader, sheet_name="Data1", index_col=0) + found_df2 = pd.read_excel(reader, sheet_name="Data2", index_col=0) tm.assert_frame_equal(found_df, frame) tm.assert_frame_equal(found_df2, frame2) @@ -359,25 +366,27 @@ def test_roundtrip(self, frame, path): # test roundtrip frame.to_excel(path, "test1") - recons = pd.read_excel(path, "test1", index_col=0) + recons = pd.read_excel(path, sheet_name="test1", index_col=0) tm.assert_frame_equal(frame, recons) frame.to_excel(path, "test1", index=False) - recons = pd.read_excel(path, "test1", index_col=None) + recons = pd.read_excel(path, sheet_name="test1", index_col=None) recons.index = frame.index tm.assert_frame_equal(frame, recons) frame.to_excel(path, "test1", na_rep="NA") - recons = pd.read_excel(path, "test1", index_col=0, na_values=["NA"]) + recons = pd.read_excel(path, sheet_name="test1", index_col=0, na_values=["NA"]) tm.assert_frame_equal(frame, recons) # GH 3611 frame.to_excel(path, "test1", na_rep="88") - recons = pd.read_excel(path, "test1", index_col=0, na_values=["88"]) + recons = pd.read_excel(path, sheet_name="test1", index_col=0, na_values=["88"]) tm.assert_frame_equal(frame, recons) frame.to_excel(path, "test1", na_rep="88") - recons = pd.read_excel(path, "test1", index_col=0, na_values=[88, 88.0]) + recons = pd.read_excel( + path, sheet_name="test1", index_col=0, na_values=[88, 88.0] + ) tm.assert_frame_equal(frame, recons) # GH 6573 @@ -401,7 +410,7 @@ def test_mixed(self, frame, path): mixed_frame.to_excel(path, "test1") reader = ExcelFile(path) - recons = pd.read_excel(reader, "test1", index_col=0) + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) tm.assert_frame_equal(mixed_frame, recons) def test_ts_frame(self, tsframe, path): @@ -414,7 +423,7 @@ def test_ts_frame(self, tsframe, path): df.to_excel(path, "test1") reader = ExcelFile(path) - recons = pd.read_excel(reader, "test1", index_col=0) + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) tm.assert_frame_equal(df, recons) def test_basics_with_nan(self, frame, path): @@ -433,17 +442,19 @@ def test_int_types(self, np_type, path): df.to_excel(path, "test1") reader = ExcelFile(path) - recons = pd.read_excel(reader, "test1", index_col=0) + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) int_frame = df.astype(np.int64) tm.assert_frame_equal(int_frame, recons) - recons2 = pd.read_excel(path, "test1", index_col=0) + recons2 = pd.read_excel(path, sheet_name="test1", index_col=0) tm.assert_frame_equal(int_frame, recons2) # Test with convert_float=False comes back as float. float_frame = df.astype(float) - recons = pd.read_excel(path, "test1", convert_float=False, index_col=0) + recons = pd.read_excel( + path, sheet_name="test1", convert_float=False, index_col=0 + ) tm.assert_frame_equal( recons, float_frame, check_index_type=False, check_column_type=False ) @@ -455,7 +466,7 @@ def test_float_types(self, np_type, path): df.to_excel(path, "test1") reader = ExcelFile(path) - recons = pd.read_excel(reader, "test1", index_col=0).astype(np_type) + recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype(np_type) tm.assert_frame_equal(df, recons) @@ -466,7 +477,7 @@ def test_bool_types(self, np_type, path): df.to_excel(path, "test1") reader = ExcelFile(path) - recons = pd.read_excel(reader, "test1", index_col=0).astype(np_type) + recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype(np_type) tm.assert_frame_equal(df, recons) @@ -475,7 +486,7 @@ def test_inf_roundtrip(self, path): df.to_excel(path, "test1") reader = ExcelFile(path) - recons = pd.read_excel(reader, "test1", index_col=0) + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) tm.assert_frame_equal(df, recons) @@ -499,9 +510,9 @@ def test_sheets(self, frame, tsframe, path): tsframe.to_excel(writer, "test2") writer.save() reader = ExcelFile(path) - recons = pd.read_excel(reader, "test1", index_col=0) + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) tm.assert_frame_equal(frame, recons) - recons = pd.read_excel(reader, "test2", index_col=0) + recons = pd.read_excel(reader, sheet_name="test2", index_col=0) tm.assert_frame_equal(tsframe, recons) assert 2 == len(reader.sheet_names) assert "test1" == reader.sheet_names[0] @@ -520,7 +531,7 @@ def test_colaliases(self, frame, path): col_aliases = Index(["AA", "X", "Y", "Z"]) frame.to_excel(path, "test1", header=col_aliases) reader = ExcelFile(path) - rs = pd.read_excel(reader, "test1", index_col=0) + rs = pd.read_excel(reader, sheet_name="test1", index_col=0) xp = frame.copy() xp.columns = col_aliases tm.assert_frame_equal(xp, rs) @@ -538,7 +549,7 @@ def test_roundtrip_indexlabels(self, merge_cells, frame, path): df = DataFrame(np.random.randn(10, 2)) >= 0 df.to_excel(path, "test1", index_label=["test"], merge_cells=merge_cells) reader = ExcelFile(path) - recons = pd.read_excel(reader, "test1", index_col=0).astype(np.int64) + recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype(np.int64) df.index.names = ["test"] assert df.index.names == recons.index.names @@ -550,14 +561,14 @@ def test_roundtrip_indexlabels(self, merge_cells, frame, path): merge_cells=merge_cells, ) reader = ExcelFile(path) - recons = pd.read_excel(reader, "test1", index_col=0).astype(np.int64) + recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype(np.int64) df.index.names = ["test"] assert df.index.names == recons.index.names df = DataFrame(np.random.randn(10, 2)) >= 0 df.to_excel(path, "test1", index_label="test", merge_cells=merge_cells) reader = ExcelFile(path) - recons = pd.read_excel(reader, "test1", index_col=0).astype(np.int64) + recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype(np.int64) df.index.names = ["test"] tm.assert_frame_equal(df, recons.astype(bool)) @@ -573,7 +584,7 @@ def test_roundtrip_indexlabels(self, merge_cells, frame, path): df = df.set_index(["A", "B"]) reader = ExcelFile(path) - recons = pd.read_excel(reader, "test1", index_col=[0, 1]) + recons = pd.read_excel(reader, sheet_name="test1", index_col=[0, 1]) tm.assert_frame_equal(df, recons) def test_excel_roundtrip_indexname(self, merge_cells, path): @@ -583,7 +594,7 @@ def test_excel_roundtrip_indexname(self, merge_cells, path): df.to_excel(path, merge_cells=merge_cells) xf = ExcelFile(path) - result = pd.read_excel(xf, xf.sheet_names[0], index_col=0) + result = pd.read_excel(xf, sheet_name=xf.sheet_names[0], index_col=0) tm.assert_frame_equal(result, df) assert result.index.name == "foo" @@ -601,7 +612,7 @@ def test_excel_roundtrip_datetime(self, merge_cells, tsframe, path): tsf.to_excel(path, "test1", merge_cells=merge_cells) reader = ExcelFile(path) - recons = pd.read_excel(reader, "test1", index_col=0) + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) tm.assert_frame_equal(tsframe, recons) @@ -643,8 +654,8 @@ def test_excel_date_datetime_format(self, engine, ext, path): reader1 = ExcelFile(path) reader2 = ExcelFile(filename2) - rs1 = pd.read_excel(reader1, "test1", index_col=0) - rs2 = pd.read_excel(reader2, "test1", index_col=0) + rs1 = pd.read_excel(reader1, sheet_name="test1", index_col=0) + rs2 = pd.read_excel(reader2, sheet_name="test1", index_col=0) tm.assert_frame_equal(rs1, rs2) @@ -665,7 +676,7 @@ def test_to_excel_interval_no_labels(self, path): df.to_excel(path, "test1") reader = ExcelFile(path) - recons = pd.read_excel(reader, "test1", index_col=0) + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) tm.assert_frame_equal(expected, recons) def test_to_excel_interval_labels(self, path): @@ -683,7 +694,7 @@ def test_to_excel_interval_labels(self, path): df.to_excel(path, "test1") reader = ExcelFile(path) - recons = pd.read_excel(reader, "test1", index_col=0) + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) tm.assert_frame_equal(expected, recons) def test_to_excel_timedelta(self, path): @@ -703,7 +714,7 @@ def test_to_excel_timedelta(self, path): df.to_excel(path, "test1") reader = ExcelFile(path) - recons = pd.read_excel(reader, "test1", index_col=0) + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) tm.assert_frame_equal(expected, recons) def test_to_excel_periodindex(self, tsframe, path): @@ -712,7 +723,7 @@ def test_to_excel_periodindex(self, tsframe, path): xp.to_excel(path, "sht1") reader = ExcelFile(path) - rs = pd.read_excel(reader, "sht1", index_col=0) + rs = pd.read_excel(reader, sheet_name="sht1", index_col=0) tm.assert_frame_equal(xp, rs.to_period("M")) def test_to_excel_multiindex(self, merge_cells, frame, path): @@ -726,7 +737,7 @@ def test_to_excel_multiindex(self, merge_cells, frame, path): # round trip frame.to_excel(path, "test1", merge_cells=merge_cells) reader = ExcelFile(path) - df = pd.read_excel(reader, "test1", index_col=[0, 1]) + df = pd.read_excel(reader, sheet_name="test1", index_col=[0, 1]) tm.assert_frame_equal(frame, df) # GH13511 @@ -757,7 +768,7 @@ def test_to_excel_multiindex_cols(self, merge_cells, frame, path): # round trip frame.to_excel(path, "test1", merge_cells=merge_cells) reader = ExcelFile(path) - df = pd.read_excel(reader, "test1", header=header, index_col=[0, 1]) + df = pd.read_excel(reader, sheet_name="test1", header=header, index_col=[0, 1]) if not merge_cells: fm = frame.columns.format(sparsify=False, adjoin=False, names=False) frame.columns = [".".join(map(str, q)) for q in zip(*fm)] @@ -771,7 +782,7 @@ def test_to_excel_multiindex_dates(self, merge_cells, tsframe, path): tsframe.index.names = ["time", "foo"] tsframe.to_excel(path, "test1", merge_cells=merge_cells) reader = ExcelFile(path) - recons = pd.read_excel(reader, "test1", index_col=[0, 1]) + recons = pd.read_excel(reader, sheet_name="test1", index_col=[0, 1]) tm.assert_frame_equal(tsframe, recons) assert recons.index.names == ("time", "foo") @@ -792,7 +803,7 @@ def test_to_excel_multiindex_no_write_index(self, path): # Read it back in. reader = ExcelFile(path) - frame3 = pd.read_excel(reader, "test1") + frame3 = pd.read_excel(reader, sheet_name="test1") # Test that it is the same as the initial frame. tm.assert_frame_equal(frame1, frame3) @@ -806,7 +817,7 @@ def test_to_excel_float_format(self, path): df.to_excel(path, "test1", float_format="%.2f") reader = ExcelFile(path) - result = pd.read_excel(reader, "test1", index_col=0) + result = pd.read_excel(reader, sheet_name="test1", index_col=0) expected = DataFrame( [[0.12, 0.23, 0.57], [12.32, 123123.20, 321321.20]], @@ -825,7 +836,9 @@ def test_to_excel_output_encoding(self, ext): with tm.ensure_clean("__tmp_to_excel_float_format__." + ext) as filename: df.to_excel(filename, sheet_name="TestSheet", encoding="utf8") - result = pd.read_excel(filename, "TestSheet", encoding="utf8", index_col=0) + result = pd.read_excel( + filename, sheet_name="TestSheet", encoding="utf8", index_col=0 + ) tm.assert_frame_equal(result, df) def test_to_excel_unicode_filename(self, ext, path): @@ -845,7 +858,7 @@ def test_to_excel_unicode_filename(self, ext, path): df.to_excel(filename, "test1", float_format="%.2f") reader = ExcelFile(filename) - result = pd.read_excel(reader, "test1", index_col=0) + result = pd.read_excel(reader, sheet_name="test1", index_col=0) expected = DataFrame( [[0.12, 0.23, 0.57], [12.32, 123123.20, 321321.20]], @@ -965,7 +978,7 @@ def roundtrip(data, header=True, parser_hdr=0, index=True): data.to_excel(path, header=header, merge_cells=merge_cells, index=index) xf = ExcelFile(path) - return pd.read_excel(xf, xf.sheet_names[0], header=parser_hdr) + return pd.read_excel(xf, sheet_name=xf.sheet_names[0], header=parser_hdr) # Basic test. parser_header = 0 if use_headers else None @@ -1017,18 +1030,20 @@ def test_duplicated_columns(self, path): ) # By default, we mangle. - result = pd.read_excel(path, "test1", index_col=0) + result = pd.read_excel(path, sheet_name="test1", index_col=0) tm.assert_frame_equal(result, expected) # Explicitly, we pass in the parameter. - result = pd.read_excel(path, "test1", index_col=0, mangle_dupe_cols=True) + result = pd.read_excel( + path, sheet_name="test1", index_col=0, mangle_dupe_cols=True + ) tm.assert_frame_equal(result, expected) # see gh-11007, gh-10970 df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=["A", "B", "A", "B"]) df.to_excel(path, "test1") - result = pd.read_excel(path, "test1", index_col=0) + result = pd.read_excel(path, sheet_name="test1", index_col=0) expected = DataFrame( [[1, 2, 3, 4], [5, 6, 7, 8]], columns=["A", "B", "A.1", "B.1"] ) @@ -1036,21 +1051,21 @@ def test_duplicated_columns(self, path): # see gh-10982 df.to_excel(path, "test1", index=False, header=False) - result = pd.read_excel(path, "test1", header=None) + result = pd.read_excel(path, sheet_name="test1", header=None) expected = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]]) tm.assert_frame_equal(result, expected) msg = "Setting mangle_dupe_cols=False is not supported yet" with pytest.raises(ValueError, match=msg): - pd.read_excel(path, "test1", header=None, mangle_dupe_cols=False) + pd.read_excel(path, sheet_name="test1", header=None, mangle_dupe_cols=False) def test_swapped_columns(self, path): # Test for issue #5427. write_frame = DataFrame({"A": [1, 1, 1], "B": [2, 2, 2]}) write_frame.to_excel(path, "test1", columns=["B", "A"]) - read_frame = pd.read_excel(path, "test1", header=0) + read_frame = pd.read_excel(path, sheet_name="test1", header=0) tm.assert_series_equal(write_frame["A"], read_frame["A"]) tm.assert_series_equal(write_frame["B"], read_frame["B"]) @@ -1083,7 +1098,7 @@ def test_write_subset_columns(self, path, to_excel_index, read_excel_index_col): expected = write_frame[["A", "B"]] read_frame = pd.read_excel( - path, "col_subset_bug", index_col=read_excel_index_col + path, sheet_name="col_subset_bug", index_col=read_excel_index_col ) tm.assert_frame_equal(expected, read_frame) @@ -1098,13 +1113,13 @@ def test_comment_arg(self, path): df.to_excel(path, "test_c") # Read file without comment arg. - result1 = pd.read_excel(path, "test_c", index_col=0) + result1 = pd.read_excel(path, sheet_name="test_c", index_col=0) result1.iloc[1, 0] = None result1.iloc[1, 1] = None result1.iloc[2, 1] = None - result2 = pd.read_excel(path, "test_c", comment="#", index_col=0) + result2 = pd.read_excel(path, sheet_name="test_c", comment="#", index_col=0) tm.assert_frame_equal(result1, result2) def test_comment_default(self, path): @@ -1116,8 +1131,8 @@ def test_comment_default(self, path): df.to_excel(path, "test_c") # Read file with default and explicit comment=None - result1 = pd.read_excel(path, "test_c") - result2 = pd.read_excel(path, "test_c", comment=None) + result1 = pd.read_excel(path, sheet_name="test_c") + result2 = pd.read_excel(path, sheet_name="test_c", comment=None) tm.assert_frame_equal(result1, result2) def test_comment_used(self, path): @@ -1131,7 +1146,7 @@ def test_comment_used(self, path): # Test read_frame_comment against manually produced expected output. expected = DataFrame({"A": ["one", None, "one"], "B": ["two", None, None]}) - result = pd.read_excel(path, "test_c", comment="#", index_col=0) + result = pd.read_excel(path, sheet_name="test_c", comment="#", index_col=0) tm.assert_frame_equal(result, expected) def test_comment_empty_line(self, path): @@ -1165,7 +1180,7 @@ def test_datetimes(self, path): write_frame = DataFrame({"A": datetimes}) write_frame.to_excel(path, "Sheet1") - read_frame = pd.read_excel(path, "Sheet1", header=0) + read_frame = pd.read_excel(path, sheet_name="Sheet1", header=0) tm.assert_series_equal(write_frame["A"], read_frame["A"]) @@ -1193,7 +1208,7 @@ def test_write_lists_dict(self, path): } ) df.to_excel(path, "Sheet1") - read = pd.read_excel(path, "Sheet1", header=0, index_col=0) + read = pd.read_excel(path, sheet_name="Sheet1", header=0, index_col=0) expected = df.copy() expected.mixed = expected.mixed.apply(str) diff --git a/pandas/tests/io/excel/test_xlrd.py b/pandas/tests/io/excel/test_xlrd.py index d456afe4ed351..1c9c514b20f46 100644 --- a/pandas/tests/io/excel/test_xlrd.py +++ b/pandas/tests/io/excel/test_xlrd.py @@ -28,7 +28,7 @@ def test_read_xlrd_book(read_ext, frame): book = xlrd.open_workbook(pth) with ExcelFile(book, engine=engine) as xl: - result = pd.read_excel(xl, sheet_name, index_col=0) + result = pd.read_excel(xl, sheet_name=sheet_name, index_col=0) tm.assert_frame_equal(df, result) result = pd.read_excel(book, sheet_name=sheet_name, engine=engine, index_col=0) @@ -40,4 +40,4 @@ def test_excel_table_sheet_by_index(datapath, read_ext): path = datapath("io", "data", "excel", f"test1{read_ext}") with pd.ExcelFile(path) as excel: with pytest.raises(xlrd.XLRDError): - pd.read_excel(excel, "asdf") + pd.read_excel(excel, sheet_name="asdf")
Follow-up to #27573. Allows two non-keyword arguments, ``io`` and ``sheet_name``. I think ``sheet_name`` is quite often (e.g. Interactively) supplied without being a keyword argument and requiring it will just be needlessly annoying. Also some clean-up in pandas/tests/io/excel.
https://api.github.com/repos/pandas-dev/pandas/pulls/34418
2020-05-27T23:15:39Z
2020-05-29T01:00:45Z
2020-05-29T01:00:44Z
2020-05-29T06:12:00Z
TST closes #28980 Certain comparison operations misbehaving for period dtype
diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py index 9fc6568a019b6..d206622521816 100644 --- a/pandas/tests/arithmetic/test_period.py +++ b/pandas/tests/arithmetic/test_period.py @@ -1522,3 +1522,11 @@ def test_pi_sub_period_nat(self): exp = pd.TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name="idx") tm.assert_index_equal(idx - pd.Period("NaT", freq="M"), exp) tm.assert_index_equal(pd.Period("NaT", freq="M") - idx, exp) + + @pytest.mark.parametrize("scalars", ["a", False, 1, 1.0, None]) + def test_comparison_operations(self, scalars): + # GH 28980 + expected = Series([False, False]) + s = Series([pd.Period("2019"), pd.Period("2020")], dtype="period[A-DEC]") + result = s == scalars + tm.assert_series_equal(result, expected)
I've added test to compare `pd.Series([...], dtype="period[A-DEC]")` to dtype `str`, `bool`, `int`, `float` and `None`. - [ x ] closes #28980 - [ 5 ] tests added / passed - [ x ] passes `black pandas` - [ x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/34417
2020-05-27T20:54:29Z
2020-05-28T17:17:44Z
2020-05-28T17:17:44Z
2020-06-11T17:30:42Z
Macpython 32 bit build fixup
diff --git a/pandas/_libs/hashtable_func_helper.pxi.in b/pandas/_libs/hashtable_func_helper.pxi.in index c63f368dfae43..326ae36c6a12c 100644 --- a/pandas/_libs/hashtable_func_helper.pxi.in +++ b/pandas/_libs/hashtable_func_helper.pxi.in @@ -84,7 +84,8 @@ cpdef value_count_{{dtype}}({{c_type}}[:] values, bint dropna): int64_t[:] result_counts {{endif}} - Py_ssize_t k + # Don't use Py_ssize_t, since table.n_buckets is unsigned + khiter_t k table = kh_init_{{ttype}}() {{if dtype == 'object'}} @@ -132,7 +133,8 @@ def duplicated_{{dtype}}(const {{c_type}}[:] values, object keep='first'): {{if dtype != 'object'}} {{dtype}}_t value {{endif}} - Py_ssize_t k, i, n = len(values) + Py_ssize_t i, n = len(values) + khiter_t k kh_{{ttype}}_t *table = kh_init_{{ttype}}() ndarray[uint8_t, ndim=1, cast=True] out = np.empty(n, dtype='bool') @@ -222,7 +224,8 @@ def ismember_{{dtype}}(const {{c_type}}[:] arr, {{c_type}}[:] values): boolean ndarry len of (arr) """ cdef: - Py_ssize_t i, n, k + Py_ssize_t i, n + khiter_t k int ret = 0 ndarray[uint8_t] result {{c_type}} val @@ -295,7 +298,8 @@ def mode_{{dtype}}({{ctype}}[:] values, bint dropna): cdef: int count, max_count = 1 int j = -1 # so you can do += - Py_ssize_t k + # Don't use Py_ssize_t, since table.n_buckets is unsigned + khiter_t k kh_{{table_type}}_t *table ndarray[{{ctype}}] modes diff --git a/pandas/_libs/src/parser/tokenizer.c b/pandas/_libs/src/parser/tokenizer.c index 7ba1a6cd398c9..a195c0daf5271 100644 --- a/pandas/_libs/src/parser/tokenizer.c +++ b/pandas/_libs/src/parser/tokenizer.c @@ -709,7 +709,7 @@ int skip_this_line(parser_t *self, int64_t rownum) { } int tokenize_bytes(parser_t *self, - size_t line_limit, int64_t start_lines) { + size_t line_limit, uint64_t start_lines) { int64_t i; uint64_t slen; int should_skip; @@ -1348,7 +1348,7 @@ int parser_trim_buffers(parser_t *self) { int _tokenize_helper(parser_t *self, size_t nrows, int all) { int status = 0; - int64_t start_lines = self->lines; + uint64_t start_lines = self->lines; if (self->state == FINISHED) { return 0; diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 9757c4d36d5fa..aadf2c41f7941 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -76,7 +76,7 @@ from pandas._libs.tslibs.tzconversion cimport tz_convert_utc_to_tzlocal cdef: enum: - INT32_MIN = -2_147_483_648 + INT32_MIN = -2_147_483_648LL ctypedef struct asfreq_info: @@ -108,9 +108,11 @@ cdef extern from *: #define FR_UND -10000 /* Undefined */ // must use npy typedef b/c int64_t is aliased in cython-generated c + // unclear why we need LL for that row. + // see https://github.com/pandas-dev/pandas/pull/34416/ static npy_int64 daytime_conversion_factor_matrix[7][7] = { {1, 24, 1440, 86400, 86400000, 86400000000, 86400000000000}, - {0, 1, 60, 3600, 3600000, 3600000000, 3600000000000}, + {0LL, 1LL, 60LL, 3600LL, 3600000LL, 3600000000LL, 3600000000000LL}, {0, 0, 1, 60, 60000, 60000000, 60000000000}, {0, 0, 0, 1, 1000, 1000000, 1000000000}, {0, 0, 0, 0, 1, 1000, 1000000},
Closes #34114 cc @jbrockmendel and @WillAyd. I have no idea if this is correct, but it builds.
https://api.github.com/repos/pandas-dev/pandas/pulls/34416
2020-05-27T20:00:26Z
2020-05-29T19:05:41Z
2020-05-29T19:05:40Z
2020-05-29T19:05:45Z
BUG: merge between partial index and index fails when result is empty
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 9d1b3eaebdf8b..968db9de97093 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -293,6 +293,7 @@ Groupby/resample/rolling Reshaping ^^^^^^^^^ +- Bug in :func:`merge` raising error when performing an inner join with partial index and ``right_index`` when no overlap between indices (:issue:`33814`) - Bug in :meth:`DataFrame.unstack` with missing levels led to incorrect index names (:issue:`37510`) - Bug in :func:`concat` incorrectly casting to ``object`` dtype in some cases when one or more of the operands is empty (:issue:`38843`) - diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 1354e72cadc5a..ac5fc7cddf82a 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -864,9 +864,9 @@ def _maybe_add_join_keys(self, result, left_indexer, right_indexer): mask_left = left_indexer == -1 mask_right = right_indexer == -1 if mask_left.all(): - key_col = rvals + key_col = Index(rvals) elif right_indexer is not None and mask_right.all(): - key_col = lvals + key_col = Index(lvals) else: key_col = Index(lvals).where(~mask_left, rvals) diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index d430856776269..da3ac81c4aa17 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -2375,3 +2375,15 @@ def test_merge_right_left_index(): } ) tm.assert_frame_equal(result, expected) + + +def test_merge_result_empty_index_and_on(): + # GH#33814 + df1 = DataFrame({"a": [1], "b": [2]}).set_index(["a", "b"]) + df2 = DataFrame({"b": [1]}).set_index(["b"]) + expected = DataFrame({"a": [], "b": []}, dtype=np.int64).set_index(["a", "b"]) + result = merge(df1, df2, left_on=["b"], right_index=True) + tm.assert_frame_equal(result, expected) + + result = merge(df2, df1, left_index=True, right_on=["b"]) + tm.assert_frame_equal(result, expected)
- [x] closes #33814 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry The Issue was caused when setting the name of an numpy array.
https://api.github.com/repos/pandas-dev/pandas/pulls/34414
2020-05-27T18:35:11Z
2021-01-03T23:21:42Z
2021-01-03T23:21:41Z
2021-01-03T23:23:12Z
Initialize variables in pqyear, pquarter
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index cc6c4d06ae562..fa8c0414aa8d0 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -1281,14 +1281,16 @@ cdef int pyear(int64_t ordinal, int freq): @cython.cdivision cdef int pqyear(int64_t ordinal, int freq): cdef: - int year, quarter + int year = 0 + int quarter = 0 get_yq(ordinal, freq, &quarter, &year) return year cdef int pquarter(int64_t ordinal, int freq): cdef: - int year, quarter + int year = 0 + int quarter = 0 get_yq(ordinal, freq, &quarter, &year) return quarter
This resolves the `-Wmaybe-uninitialized` warning observed when building with the manylinux1 docker image ``` gcc -pthread -Wno-unused-result -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -fPIC -DNPY_NO_DEPRECATED_API=0 -Ipandas/_libs/tslibs -I./pandas/_libs/tslibs -I/opt/python/cp38-cp38/lib/python3.8/site-packages/numpy/core/include -I/opt/python/cp38-cp38/include/python3.8 -c pandas/_libs/tslibs/period.c -o build/temp.linux-x86_64-3.8/pandas/_libs/tslibs/period.o pandas/_libs/tslibs/period.c: In function ‘__pyx_f_6pandas_5_libs_6tslibs_6period_pqyear’: pandas/_libs/tslibs/period.c:12464:3: warning: ‘__pyx_v_year’ may be used uninitialized in this function [-Wmaybe-uninitialized] return __pyx_r; ^ pandas/_libs/tslibs/period.c: In function ‘__pyx_f_6pandas_5_libs_6tslibs_6period_pquarter’: pandas/_libs/tslibs/period.c:12512:3: warning: ‘__pyx_v_quarter’ may be used uninitialized in this function [-Wmaybe-uninitialized] return __pyx_r; ``` Hopefully this doesn't break anything. IIUC, these pointers are just used to set the result, so it shouldn't matter what the value is going in? Closes https://github.com/pandas-dev/pandas/issues/34114
https://api.github.com/repos/pandas-dev/pandas/pulls/34409
2020-05-27T15:38:05Z
2020-05-27T16:25:05Z
2020-05-27T16:25:05Z
2020-05-27T16:30:08Z
DOC: add link to benchmarks page in developer docs
diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst index d02896f777348..457aabcff0c17 100644 --- a/doc/source/development/contributing.rst +++ b/doc/source/development/contributing.rst @@ -1275,8 +1275,8 @@ Performance matters and it is worth considering whether your code has introduced performance regressions. pandas is in the process of migrating to `asv benchmarks <https://github.com/spacetelescope/asv>`__ to enable easy monitoring of the performance of critical pandas operations. -These benchmarks are all found in the ``pandas/asv_bench`` directory. asv -supports both python2 and python3. +These benchmarks are all found in the ``pandas/asv_bench`` directory, and the +test results can be found `here <https://pandas.pydata.org/speed/pandas/#/>`__. To use all features of asv, you will need either ``conda`` or ``virtualenv``. For more details please check the `asv installation
- [x] closes #34405 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew @jreback
https://api.github.com/repos/pandas-dev/pandas/pulls/34408
2020-05-27T15:33:41Z
2020-05-27T23:12:06Z
2020-05-27T23:12:06Z
2020-05-27T23:12:11Z
REGR: revert "CLN: _consolidate_inplace less" / fix regression in fillna()
diff --git a/doc/source/whatsnew/v1.1.5.rst b/doc/source/whatsnew/v1.1.5.rst index a8bbf692a72e5..29b0e99a3a356 100644 --- a/doc/source/whatsnew/v1.1.5.rst +++ b/doc/source/whatsnew/v1.1.5.rst @@ -21,6 +21,7 @@ Fixed regressions - Fixed regression in metadata propagation for ``groupby`` iterator (:issue:`37343`) - Fixed regression in indexing on a :class:`Series` with ``CategoricalDtype`` after unpickling (:issue:`37631`) - Fixed regression in ``df.groupby(..).rolling(..)`` with the resulting :class:`MultiIndex` when grouping by a label that is in the index (:issue:`37641`) +- Fixed regression in :meth:`DataFrame.fillna` not filling ``NaN`` after other operations such as :meth:`DataFrame.pivot` (:issue:`36495`). .. --------------------------------------------------------------------------- diff --git a/pandas/core/generic.py b/pandas/core/generic.py index e866314f00639..d6a2480cf688d 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3709,6 +3709,8 @@ class animal locomotion else: index = self.index + self._consolidate_inplace() + if isinstance(index, MultiIndex): try: loc, new_index = index._get_loc_level( @@ -6327,6 +6329,8 @@ def fillna( inplace = validate_bool_kwarg(inplace, "inplace") value, method = validate_fillna_kwargs(value, method) + self._consolidate_inplace() + # set the default here, so functions examining the signaure # can detect if something was set (e.g. in groupby) (GH9221) if axis is None: @@ -6749,6 +6753,8 @@ def replace( if not is_bool(regex) and to_replace is not None: raise ValueError("'to_replace' must be 'None' if 'regex' is not a bool") + self._consolidate_inplace() + if value is None: # passing a single value that is scalar like # when value is None (GH5319), for compat diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 760765e3a20e6..b13368ca5af18 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -442,6 +442,7 @@ def apply( def quantile( self, axis: int = 0, + consolidate: bool = True, transposed: bool = False, interpolation="linear", qs=None, @@ -455,6 +456,8 @@ def quantile( Parameters ---------- axis: reduction axis, default 0 + consolidate: bool, default True. Join together blocks having same + dtype transposed: bool, default False we are holding transposed data interpolation : type of interpolation, default 'linear' @@ -469,6 +472,9 @@ def quantile( # simplify some of the code here and in the blocks assert self.ndim >= 2 + if consolidate: + self._consolidate_inplace() + def get_axe(block, qs, axes): # Because Series dispatches to DataFrame, we will always have # block.ndim == 2 diff --git a/pandas/tests/frame/methods/test_fillna.py b/pandas/tests/frame/methods/test_fillna.py index bbb57da39705b..d59b70fa91a57 100644 --- a/pandas/tests/frame/methods/test_fillna.py +++ b/pandas/tests/frame/methods/test_fillna.py @@ -524,3 +524,18 @@ def test_fill_corner(self, float_frame, float_string_frame): # TODO(wesm): unused? result = empty_float.fillna(value=0) # noqa + + +def test_fillna_nonconsolidated_frame(): + # https://github.com/pandas-dev/pandas/issues/36495 + df = DataFrame( + [ + [1, 1, 1, 1.0], + [2, 2, 2, 2.0], + [3, 3, 3, 3.0], + ], + columns=["i1", "i2", "i3", "f1"], + ) + df_nonconsol = df.pivot("i1", "i2") + result = df_nonconsol.fillna(0) + assert result.isna().sum().sum() == 0
Reverts pandas-dev/pandas#34389 Closes #36495
https://api.github.com/repos/pandas-dev/pandas/pulls/34407
2020-05-27T14:09:47Z
2020-11-26T17:00:20Z
2020-11-26T17:00:20Z
2020-11-28T19:55:32Z
BUG/API: other object type check in Series/DataFrame.equals
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 814dbe999d5c1..43b117a2bc35f 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -1162,6 +1162,8 @@ Other - Bug in :meth:`DataFrame.__dir__` caused a segfault when using unicode surrogates in a column name (:issue:`25509`) - Bug in :meth:`DataFrame.plot.scatter` caused an error when plotting variable marker sizes (:issue:`32904`) - :class:`IntegerArray` now implements the ``sum`` operation (:issue:`33172`) +- Bug in :meth:`DataFrame.equals` and :meth:`Series.equals` in allowing subclasses + to be equal (:issue:`34402`). - Bug in :class:`Tick` comparisons raising ``TypeError`` when comparing against timedelta-like objects (:issue:`34088`) - Bug in :class:`Tick` multiplication raising ``TypeError`` when multiplying by a float (:issue:`34486`) - Passing a `set` as `names` argument to :func:`pandas.read_csv`, :func:`pandas.read_table`, or :func:`pandas.read_fwf` will raise ``ValueError: Names should be an ordered collection.`` (:issue:`34946`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index eb55369d83593..e46fde1f59f16 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1278,7 +1278,7 @@ def equals(self, other): >>> df.equals(different_data_type) False """ - if not isinstance(other, self._constructor): + if not (isinstance(other, type(self)) or isinstance(self, type(other))): return False return self._mgr.equals(other._mgr) diff --git a/pandas/tests/frame/test_subclass.py b/pandas/tests/frame/test_subclass.py index 08920cf7fceeb..2b462d5a10c51 100644 --- a/pandas/tests/frame/test_subclass.py +++ b/pandas/tests/frame/test_subclass.py @@ -696,3 +696,11 @@ def test_idxmax_preserves_subclass(self): df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) result = df.idxmax() assert isinstance(result, tm.SubclassedSeries) + + def test_equals_subclass(self): + # https://github.com/pandas-dev/pandas/pull/34402 + # allow subclass in both directions + df1 = pd.DataFrame({"a": [1, 2, 3]}) + df2 = tm.SubclassedDataFrame({"a": [1, 2, 3]}) + assert df1.equals(df2) + assert df2.equals(df1) diff --git a/pandas/tests/series/test_subclass.py b/pandas/tests/series/test_subclass.py index a596ed49c1df2..86330b7cc6993 100644 --- a/pandas/tests/series/test_subclass.py +++ b/pandas/tests/series/test_subclass.py @@ -51,3 +51,11 @@ def test_explode(self): s = tm.SubclassedSeries([[1, 2, 3], "foo", [], [3, 4]]) result = s.explode() assert isinstance(result, tm.SubclassedSeries) + + def test_equals(self): + # https://github.com/pandas-dev/pandas/pull/34402 + # allow subclass in both directions + s1 = pd.Series([1, 2, 3]) + s2 = tm.SubclassedSeries([1, 2, 3]) + assert s1.equals(s2) + assert s2.equals(s1)
xref https://github.com/geopandas/geopandas/issues/1420 First, this PR is fixing the "bug" that we shouldn't rely on `_constructor` being a class that can be used in `isinstance` (see https://github.com/pandas-dev/pandas/issues/32638 for the general discussion about this, this is the only place in our code where `_constructor` is used like this, AFAIK). And even if `_constructor` would be a class, it wouldn't necessarily be the correct class to check with (or not more correct than `type(self)`) But, so this also brings up the API question: what are the "requirements" we put on the type of `other` ? Should it be the same type? (as then could also change it to `if not type(self) is type(other): ...`) Or is a subclass sufficient (with `isinstance`) ? The problem with an isinstance checks with subclasses is that then the order matters (eg `subdf.equals(df)` would not necessarily give the same answer as `df.equals(subdf)`)
https://api.github.com/repos/pandas-dev/pandas/pulls/34402
2020-05-27T07:56:20Z
2020-07-15T22:25:23Z
2020-07-15T22:25:23Z
2020-07-15T22:25:27Z
CI: Remove unused import in offsets.py
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index c5de4533955ff..fd304dc2425a3 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -1,5 +1,4 @@ from datetime import datetime, timedelta -import operator import numpy as np
https://api.github.com/repos/pandas-dev/pandas/pulls/34401
2020-05-27T03:00:13Z
2020-05-27T03:28:58Z
2020-05-27T03:28:58Z
2020-05-27T03:29:18Z
CLN: de-duplicate paths in tslibs
diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx index bf895f155fc59..3a1af9fdb1e8f 100644 --- a/pandas/_libs/tslibs/parsing.pyx +++ b/pandas/_libs/tslibs/parsing.pyx @@ -376,7 +376,7 @@ cpdef bint _does_string_look_like_datetime(str py_string): return True -cdef inline object _parse_dateabbr_string(object date_string, object default, +cdef inline object _parse_dateabbr_string(object date_string, datetime default, object freq): cdef: object ret diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index cc6c4d06ae562..b5488e3d6d5a3 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -955,35 +955,6 @@ cdef inline int month_to_quarter(int month) nogil: # ---------------------------------------------------------------------- # Period logic -@cython.wraparound(False) -@cython.boundscheck(False) -def dt64arr_to_periodarr(const int64_t[:] dtarr, int freq, tz=None): - """ - Convert array of datetime64 values (passed in as 'i8' dtype) to a set of - periods corresponding to desired frequency, per period convention. - """ - cdef: - int64_t[:] out - Py_ssize_t i, l - npy_datetimestruct dts - - l = len(dtarr) - - out = np.empty(l, dtype='i8') - - if tz is None: - with nogil: - for i in range(l): - if dtarr[i] == NPY_NAT: - out[i] = NPY_NAT - continue - dt64_to_dtstruct(dtarr[i], &dts) - out[i] = get_period_ordinal(&dts, freq) - else: - out = localize_dt64arr_to_period(dtarr, freq, tz) - return out.base # .base to access underlying np.ndarray - - @cython.wraparound(False) @cython.boundscheck(False) def periodarr_to_dt64arr(const int64_t[:] periodarr, int freq): @@ -1471,8 +1442,7 @@ def extract_freq(ndarray[object] values): @cython.wraparound(False) @cython.boundscheck(False) -cdef int64_t[:] localize_dt64arr_to_period(const int64_t[:] stamps, - int freq, object tz): +def dt64arr_to_periodarr(const int64_t[:] stamps, int freq, object tz): cdef: Py_ssize_t n = len(stamps) int64_t[:] result = np.empty(n, dtype=np.int64) @@ -1521,7 +1491,7 @@ cdef int64_t[:] localize_dt64arr_to_period(const int64_t[:] stamps, dt64_to_dtstruct(stamps[i] + deltas[pos[i]], &dts) result[i] = get_period_ordinal(&dts, freq) - return result + return result.base # .base to get underlying ndarray DIFFERENT_FREQ = ("Input has different freq={other_freq} " diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx index 3d76483f76600..2133573ee7554 100644 --- a/pandas/_libs/tslibs/resolution.pyx +++ b/pandas/_libs/tslibs/resolution.pyx @@ -28,27 +28,19 @@ cdef: # ---------------------------------------------------------------------- -cpdef resolution(const int64_t[:] stamps, tz=None): +def resolution(const int64_t[:] stamps, tz=None): cdef: Py_ssize_t i, n = len(stamps) npy_datetimestruct dts int reso = RESO_DAY, curr_reso - - if tz is not None: - tz = maybe_get_tz(tz) - return _reso_local(stamps, tz) - - -cdef _reso_local(const int64_t[:] stamps, object tz): - cdef: - Py_ssize_t i, n = len(stamps) - int reso = RESO_DAY, curr_reso ndarray[int64_t] trans int64_t[:] deltas Py_ssize_t[:] pos - npy_datetimestruct dts int64_t local_val, delta + if tz is not None: + tz = maybe_get_tz(tz) + if is_utc(tz) or tz is None: for i in range(n): if stamps[i] == NPY_NAT:
https://api.github.com/repos/pandas-dev/pandas/pulls/34400
2020-05-27T02:34:59Z
2020-05-27T17:23:34Z
2020-05-27T17:23:34Z
2020-05-27T17:32:17Z
CLN: GH29547 format with f-strings
diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py index fbb44408f01be..bb6c6de441558 100644 --- a/pandas/util/_validators.py +++ b/pandas/util/_validators.py @@ -295,13 +295,13 @@ def validate_axis_style_args(data, args, kwargs, arg_name, method_name): raise TypeError(msg) msg = ( - "Interpreting call\n\t'.{method_name}(a, b)' as " - "\n\t'.{method_name}(index=a, columns=b)'.\nUse named " + f"Interpreting call\n\t'.{method_name}(a, b)' as " + f"\n\t'.{method_name}(index=a, columns=b)'.\nUse named " "arguments to remove any ambiguity. In the future, using " "positional arguments for 'index' or 'columns' will raise " "a 'TypeError'." ) - warnings.warn(msg.format(method_name=method_name), FutureWarning, stacklevel=4) + warnings.warn(msg, FutureWarning, stacklevel=4) out[data._get_axis_name(0)] = args[0] out[data._get_axis_name(1)] = args[1] else: @@ -370,12 +370,15 @@ def validate_percentile(q: Union[float, Iterable[float]]) -> np.ndarray: ------ ValueError if percentiles are not in given interval([0, 1]). """ - msg = "percentiles should all be in the interval [0, 1]. Try {0} instead." q_arr = np.asarray(q) + msg = ( + "percentiles should all be in the interval [0, 1]." + f"Try {q_arr / 100.0} instead." + ) if q_arr.ndim == 0: if not 0 <= q_arr <= 1: - raise ValueError(msg.format(q_arr / 100.0)) + raise ValueError(msg) else: if not all(0 <= qs <= 1 for qs in q_arr): - raise ValueError(msg.format(q_arr / 100.0)) + raise ValueError(msg) return q_arr
Sorry all, new PR. Didn't branch off master for the last one. #29547
https://api.github.com/repos/pandas-dev/pandas/pulls/34399
2020-05-27T02:20:12Z
2020-05-28T17:15:01Z
2020-05-28T17:15:00Z
2020-05-28T19:17:24Z
REF: move remaining offsets into liboffsets
diff --git a/doc/source/reference/offset_frequency.rst b/doc/source/reference/offset_frequency.rst index bb0dae3d5dece..fed23e48e43e9 100644 --- a/doc/source/reference/offset_frequency.rst +++ b/doc/source/reference/offset_frequency.rst @@ -134,6 +134,7 @@ Methods .. autosummary:: :toctree: api/ + CustomBusinessDay.apply_index CustomBusinessDay.apply CustomBusinessDay.copy CustomBusinessDay.isAnchored diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index f8b7e527cac01..660cd3af1b35e 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -1008,7 +1008,7 @@ def delta_to_tick(delta: timedelta) -> Tick: # -------------------------------------------------------------------- -class RelativeDeltaOffset(BaseOffset): +cdef class RelativeDeltaOffset(BaseOffset): """ DateOffset subclass backed by a dateutil relativedelta object. """ @@ -1123,6 +1123,120 @@ class RelativeDeltaOffset(BaseOffset): return True +class OffsetMeta(type): + """ + Metaclass that allows us to pretend that all BaseOffset subclasses + inherit from DateOffset (which is needed for backward-compatibility). + """ + + @classmethod + def __instancecheck__(cls, obj) -> bool: + return isinstance(obj, BaseOffset) + + @classmethod + def __subclasscheck__(cls, obj) -> bool: + return issubclass(obj, BaseOffset) + + +# TODO: figure out a way to use a metaclass with a cdef class +class DateOffset(RelativeDeltaOffset, metaclass=OffsetMeta): + """ + Standard kind of date increment used for a date range. + + Works exactly like relativedelta in terms of the keyword args you + pass in, use of the keyword n is discouraged-- you would be better + off specifying n in the keywords you use, but regardless it is + there for you. n is needed for DateOffset subclasses. + + DateOffset work as follows. Each offset specify a set of dates + that conform to the DateOffset. For example, Bday defines this + set to be the set of dates that are weekdays (M-F). To test if a + date is in the set of a DateOffset dateOffset we can use the + is_on_offset method: dateOffset.is_on_offset(date). + + If a date is not on a valid date, the rollback and rollforward + methods can be used to roll the date to the nearest valid date + before/after the date. + + DateOffsets can be created to move dates forward a given number of + valid dates. For example, Bday(2) can be added to a date to move + it two business days forward. If the date does not start on a + valid date, first it is moved to a valid date. Thus pseudo code + is: + + def __add__(date): + date = rollback(date) # does nothing if date is valid + return date + <n number of periods> + + When a date offset is created for a negative number of periods, + the date is first rolled forward. The pseudo code is: + + def __add__(date): + date = rollforward(date) # does nothing is date is valid + return date + <n number of periods> + + Zero presents a problem. Should it roll forward or back? We + arbitrarily have it rollforward: + + date + BDay(0) == BDay.rollforward(date) + + Since 0 is a bit weird, we suggest avoiding its use. + + Parameters + ---------- + n : int, default 1 + The number of time periods the offset represents. + normalize : bool, default False + Whether to round the result of a DateOffset addition down to the + previous midnight. + **kwds + Temporal parameter that add to or replace the offset value. + + Parameters that **add** to the offset (like Timedelta): + + - years + - months + - weeks + - days + - hours + - minutes + - seconds + - microseconds + - nanoseconds + + Parameters that **replace** the offset value: + + - year + - month + - day + - weekday + - hour + - minute + - second + - microsecond + - nanosecond. + + See Also + -------- + dateutil.relativedelta.relativedelta : The relativedelta type is designed + to be applied to an existing datetime an can replace specific components of + that datetime, or represents an interval of time. + + Examples + -------- + >>> from pandas.tseries.offsets import DateOffset + >>> ts = pd.Timestamp('2017-01-01 09:10:11') + >>> ts + DateOffset(months=3) + Timestamp('2017-04-01 09:10:11') + + >>> ts = pd.Timestamp('2017-01-01 09:10:11') + >>> ts + DateOffset(months=2) + Timestamp('2017-03-01 09:10:11') + """ + + pass + + # -------------------------------------------------------------------- @@ -3134,6 +3248,292 @@ cdef class Easter(SingleConstructorOffset): return date(dt.year, dt.month, dt.day) == easter(dt.year) +# ---------------------------------------------------------------------- +# Custom Offset classes + + +class CustomBusinessDay(CustomMixin, BusinessDay): + """ + DateOffset subclass representing custom business days excluding holidays. + + Parameters + ---------- + n : int, default 1 + normalize : bool, default False + Normalize start/end dates to midnight before generating date range. + weekmask : str, Default 'Mon Tue Wed Thu Fri' + Weekmask of valid business days, passed to ``numpy.busdaycalendar``. + holidays : list + List/array of dates to exclude from the set of valid business days, + passed to ``numpy.busdaycalendar``. + calendar : pd.HolidayCalendar or np.busdaycalendar + offset : timedelta, default timedelta(0) + """ + + _prefix = "C" + _attributes = frozenset( + ["n", "normalize", "weekmask", "holidays", "calendar", "offset"] + ) + + def __reduce__(self): + # np.holidaycalendar cant be pickled, so pass None there and + # it will be re-constructed within __init__ + tup = (self.n, self.normalize, self.weekmask, self.holidays, None, self.offset) + return type(self), tup + + def __init__( + self, + n=1, + normalize=False, + weekmask="Mon Tue Wed Thu Fri", + holidays=None, + calendar=None, + offset=timedelta(0), + ): + BusinessDay.__init__(self, n, normalize, offset) + CustomMixin.__init__(self, weekmask, holidays, calendar) + + def __setstate__(self, state): + self.holidays = state.pop("holidays") + self.weekmask = state.pop("weekmask") + super().__setstate__(state) + + @apply_wraps + def apply(self, other): + if self.n <= 0: + roll = "forward" + else: + roll = "backward" + + if isinstance(other, datetime): + date_in = other + np_dt = np.datetime64(date_in.date()) + + np_incr_dt = np.busday_offset( + np_dt, self.n, roll=roll, busdaycal=self.calendar + ) + + dt_date = np_incr_dt.astype(datetime) + result = datetime.combine(dt_date, date_in.time()) + + if self.offset: + result = result + self.offset + return result + + elif isinstance(other, (timedelta, Tick)): + return BDay(self.n, offset=self.offset + other, normalize=self.normalize) + else: + raise ApplyTypeError( + "Only know how to combine trading day with " + "datetime, datetime64 or timedelta." + ) + + def apply_index(self, i): + raise NotImplementedError + + def is_on_offset(self, dt: datetime) -> bool: + if self.normalize and not is_normalized(dt): + return False + day64 = to_dt64D(dt) + return np.is_busday(day64, busdaycal=self.calendar) + + +class CustomBusinessHour(CustomMixin, BusinessHour): + """ + DateOffset subclass representing possibly n custom business days. + """ + + _prefix = "CBH" + _anchor = 0 + _attributes = frozenset( + ["n", "normalize", "weekmask", "holidays", "calendar", "start", "end", "offset"] + ) + + def __init__( + self, + n=1, + normalize=False, + weekmask="Mon Tue Wed Thu Fri", + holidays=None, + calendar=None, + start="09:00", + end="17:00", + offset=timedelta(0), + ): + BusinessHour.__init__(self, n, normalize, start=start, end=end, offset=offset) + CustomMixin.__init__(self, weekmask, holidays, calendar) + + def __reduce__(self): + # None for self.calendar bc np.busdaycalendar doesnt pickle nicely + return ( + type(self), + ( + self.n, + self.normalize, + self.weekmask, + self.holidays, + None, + self.start, + self.end, + self.offset, + ), + ) + + +class _CustomBusinessMonth(CustomMixin, BusinessMixin, MonthOffset): + """ + DateOffset subclass representing custom business month(s). + + Increments between beginning/end of month dates. + + Parameters + ---------- + n : int, default 1 + The number of months represented. + normalize : bool, default False + Normalize start/end dates to midnight before generating date range. + weekmask : str, Default 'Mon Tue Wed Thu Fri' + Weekmask of valid business days, passed to ``numpy.busdaycalendar``. + holidays : list + List/array of dates to exclude from the set of valid business days, + passed to ``numpy.busdaycalendar``. + calendar : pd.HolidayCalendar or np.busdaycalendar + Calendar to integrate. + offset : timedelta, default timedelta(0) + Time offset to apply. + """ + + _attributes = frozenset( + ["n", "normalize", "weekmask", "holidays", "calendar", "offset"] + ) + + is_on_offset = BaseOffset.is_on_offset # override MonthOffset method + apply_index = BaseOffset.apply_index # override MonthOffset method + + def __init__( + self, + n=1, + normalize=False, + weekmask="Mon Tue Wed Thu Fri", + holidays=None, + calendar=None, + offset=timedelta(0), + ): + BusinessMixin.__init__(self, n, normalize, offset) + CustomMixin.__init__(self, weekmask, holidays, calendar) + + def __reduce__(self): + # None for self.calendar bc np.busdaycalendar doesnt pickle nicely + return ( + type(self), + (self.n, self.normalize, self.weekmask, self.holidays, None, self.offset), + ) + + @cache_readonly + def cbday_roll(self): + """ + Define default roll function to be called in apply method. + """ + cbday = CustomBusinessDay(n=self.n, normalize=False, **self.kwds) + + if self._prefix.endswith("S"): + # MonthBegin + roll_func = cbday.rollforward + else: + # MonthEnd + roll_func = cbday.rollback + return roll_func + + @cache_readonly + def m_offset(self): + if self._prefix.endswith("S"): + # MonthBegin + moff = MonthBegin(n=1, normalize=False) + else: + # MonthEnd + moff = MonthEnd(n=1, normalize=False) + return moff + + @cache_readonly + def month_roll(self): + """ + Define default roll function to be called in apply method. + """ + if self._prefix.endswith("S"): + # MonthBegin + roll_func = self.m_offset.rollback + else: + # MonthEnd + roll_func = self.m_offset.rollforward + return roll_func + + @apply_wraps + def apply(self, other): + # First move to month offset + cur_month_offset_date = self.month_roll(other) + + # Find this custom month offset + compare_date = self.cbday_roll(cur_month_offset_date) + n = roll_convention(other.day, self.n, compare_date.day) + + new = cur_month_offset_date + n * self.m_offset + result = self.cbday_roll(new) + return result + + +class CustomBusinessMonthEnd(_CustomBusinessMonth): + _prefix = "CBM" + + +class CustomBusinessMonthBegin(_CustomBusinessMonth): + _prefix = "CBMS" + + +BDay = BusinessDay +BMonthEnd = BusinessMonthEnd +BMonthBegin = BusinessMonthBegin +CBMonthEnd = CustomBusinessMonthEnd +CBMonthBegin = CustomBusinessMonthBegin +CDay = CustomBusinessDay + +prefix_mapping = { + offset._prefix: offset + for offset in [ + YearBegin, # 'AS' + YearEnd, # 'A' + BYearBegin, # 'BAS' + BYearEnd, # 'BA' + BusinessDay, # 'B' + BusinessMonthBegin, # 'BMS' + BusinessMonthEnd, # 'BM' + BQuarterEnd, # 'BQ' + BQuarterBegin, # 'BQS' + BusinessHour, # 'BH' + CustomBusinessDay, # 'C' + CustomBusinessMonthEnd, # 'CBM' + CustomBusinessMonthBegin, # 'CBMS' + CustomBusinessHour, # 'CBH' + MonthEnd, # 'M' + MonthBegin, # 'MS' + Nano, # 'N' + SemiMonthEnd, # 'SM' + SemiMonthBegin, # 'SMS' + Week, # 'W' + Second, # 'S' + Minute, # 'T' + Micro, # 'U' + QuarterEnd, # 'Q' + QuarterBegin, # 'QS' + Milli, # 'L' + Hour, # 'H' + Day, # 'D' + WeekOfMonth, # 'WOM' + FY5253, + FY5253Quarter, + ] +} + + # ---------------------------------------------------------------------- # RelativeDelta Arithmetic diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 9a1e750c5de93..877d19ef68558 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -8,6 +8,7 @@ from pandas._libs import NaT, Period, Timestamp, index as libindex, lib, tslib from pandas._libs.tslibs import fields, parsing, resolution as libresolution, timezones from pandas._libs.tslibs.frequencies import get_freq_group +from pandas._libs.tslibs.offsets import prefix_mapping from pandas._typing import DtypeObj, Label from pandas.util._decorators import cache_readonly @@ -30,7 +31,6 @@ from pandas.core.tools.times import to_time from pandas.tseries.frequencies import to_offset -from pandas.tseries.offsets import prefix_mapping def _new_DatetimeIndex(cls, d): diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py index e1936fa819baf..9d867df147096 100644 --- a/pandas/tests/indexes/datetimes/test_date_range.py +++ b/pandas/tests/indexes/datetimes/test_date_range.py @@ -10,6 +10,7 @@ from pytz import timezone from pandas._libs.tslibs import timezones +from pandas._libs.tslibs.offsets import BDay, CDay, DateOffset, MonthEnd, prefix_mapping from pandas.errors import OutOfBoundsDatetime import pandas.util._test_decorators as td @@ -18,8 +19,6 @@ import pandas._testing as tm from pandas.core.arrays.datetimes import generate_range -from pandas.tseries.offsets import BDay, CDay, DateOffset, MonthEnd, prefix_mapping - START, END = datetime(2009, 1, 1), datetime(2010, 1, 1) diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 84113afdb0969..084ad4294f9d0 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -10,7 +10,18 @@ from pandas._libs.tslibs.ccalendar import MONTH_ALIASES, MONTH_NUMBERS, int_to_weekday from pandas._libs.tslibs.fields import build_field_sarray import pandas._libs.tslibs.frequencies as libfreqs -from pandas._libs.tslibs.offsets import _offset_to_period_map +from pandas._libs.tslibs.offsets import ( + DateOffset, + Day, + Hour, + Micro, + Milli, + Minute, + Nano, + Second, + _offset_to_period_map, + prefix_mapping, +) from pandas._libs.tslibs.parsing import get_rule_month from pandas._libs.tslibs.resolution import Resolution, month_position_check from pandas._libs.tslibs.timezones import UTC @@ -26,18 +37,6 @@ from pandas.core.algorithms import unique -from pandas.tseries.offsets import ( - DateOffset, - Day, - Hour, - Micro, - Milli, - Minute, - Nano, - Second, - prefix_mapping, -) - _ONE_MICRO = 1000 _ONE_MILLI = _ONE_MICRO * 1000 _ONE_SECOND = _ONE_MILLI * 1000 diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index fd304dc2425a3..cee99d23f8d90 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -1,22 +1,25 @@ -from datetime import datetime, timedelta - -import numpy as np - -from pandas._libs.tslibs import offsets as liboffsets from pandas._libs.tslibs.offsets import ( # noqa:F401 FY5253, - ApplyTypeError, BaseOffset, + BDay, + BMonthBegin, + BMonthEnd, BQuarterBegin, BQuarterEnd, BusinessDay, BusinessHour, - BusinessMixin, BusinessMonthBegin, BusinessMonthEnd, BYearBegin, BYearEnd, - CustomMixin, + CBMonthBegin, + CBMonthEnd, + CDay, + CustomBusinessDay, + CustomBusinessHour, + CustomBusinessMonthBegin, + CustomBusinessMonthEnd, + DateOffset, Day, Easter, FY5253Quarter, @@ -33,19 +36,12 @@ Second, SemiMonthBegin, SemiMonthEnd, - SingleConstructorOffset, Tick, Week, WeekOfMonth, YearBegin, YearEnd, - apply_index_wraps, - apply_wraps, - is_normalized, - shift_month, - to_dt64D, ) -from pandas.util._decorators import cache_readonly, doc __all__ = [ "Day", @@ -85,411 +81,3 @@ "Nano", "DateOffset", ] - - -# --------------------------------------------------------------------- -# DateOffset - - -class OffsetMeta(type): - """ - Metaclass that allows us to pretend that all BaseOffset subclasses - inherit from DateOffset (which is needed for backward-compatibility). - """ - - @classmethod - def __instancecheck__(cls, obj) -> bool: - return isinstance(obj, BaseOffset) - - @classmethod - def __subclasscheck__(cls, obj) -> bool: - return issubclass(obj, BaseOffset) - - -class DateOffset(liboffsets.RelativeDeltaOffset, metaclass=OffsetMeta): - """ - Standard kind of date increment used for a date range. - - Works exactly like relativedelta in terms of the keyword args you - pass in, use of the keyword n is discouraged-- you would be better - off specifying n in the keywords you use, but regardless it is - there for you. n is needed for DateOffset subclasses. - - DateOffset work as follows. Each offset specify a set of dates - that conform to the DateOffset. For example, Bday defines this - set to be the set of dates that are weekdays (M-F). To test if a - date is in the set of a DateOffset dateOffset we can use the - is_on_offset method: dateOffset.is_on_offset(date). - - If a date is not on a valid date, the rollback and rollforward - methods can be used to roll the date to the nearest valid date - before/after the date. - - DateOffsets can be created to move dates forward a given number of - valid dates. For example, Bday(2) can be added to a date to move - it two business days forward. If the date does not start on a - valid date, first it is moved to a valid date. Thus pseudo code - is: - - def __add__(date): - date = rollback(date) # does nothing if date is valid - return date + <n number of periods> - - When a date offset is created for a negative number of periods, - the date is first rolled forward. The pseudo code is: - - def __add__(date): - date = rollforward(date) # does nothing is date is valid - return date + <n number of periods> - - Zero presents a problem. Should it roll forward or back? We - arbitrarily have it rollforward: - - date + BDay(0) == BDay.rollforward(date) - - Since 0 is a bit weird, we suggest avoiding its use. - - Parameters - ---------- - n : int, default 1 - The number of time periods the offset represents. - normalize : bool, default False - Whether to round the result of a DateOffset addition down to the - previous midnight. - **kwds - Temporal parameter that add to or replace the offset value. - - Parameters that **add** to the offset (like Timedelta): - - - years - - months - - weeks - - days - - hours - - minutes - - seconds - - microseconds - - nanoseconds - - Parameters that **replace** the offset value: - - - year - - month - - day - - weekday - - hour - - minute - - second - - microsecond - - nanosecond. - - See Also - -------- - dateutil.relativedelta.relativedelta : The relativedelta type is designed - to be applied to an existing datetime an can replace specific components of - that datetime, or represents an interval of time. - - Examples - -------- - >>> from pandas.tseries.offsets import DateOffset - >>> ts = pd.Timestamp('2017-01-01 09:10:11') - >>> ts + DateOffset(months=3) - Timestamp('2017-04-01 09:10:11') - - >>> ts = pd.Timestamp('2017-01-01 09:10:11') - >>> ts + DateOffset(months=2) - Timestamp('2017-03-01 09:10:11') - """ - - pass - - -class CustomBusinessDay(CustomMixin, BusinessDay): - """ - DateOffset subclass representing custom business days excluding holidays. - - Parameters - ---------- - n : int, default 1 - normalize : bool, default False - Normalize start/end dates to midnight before generating date range. - weekmask : str, Default 'Mon Tue Wed Thu Fri' - Weekmask of valid business days, passed to ``numpy.busdaycalendar``. - holidays : list - List/array of dates to exclude from the set of valid business days, - passed to ``numpy.busdaycalendar``. - calendar : pd.HolidayCalendar or np.busdaycalendar - offset : timedelta, default timedelta(0) - """ - - _prefix = "C" - _attributes = frozenset( - ["n", "normalize", "weekmask", "holidays", "calendar", "offset"] - ) - - def __reduce__(self): - # np.holidaycalendar cant be pickled, so pass None there and - # it will be re-constructed within __init__ - tup = (self.n, self.normalize, self.weekmask, self.holidays, None, self.offset) - return type(self), tup - - def __init__( - self, - n=1, - normalize=False, - weekmask="Mon Tue Wed Thu Fri", - holidays=None, - calendar=None, - offset=timedelta(0), - ): - BusinessDay.__init__(self, n, normalize, offset) - CustomMixin.__init__(self, weekmask, holidays, calendar) - - def __setstate__(self, state): - self.holidays = state.pop("holidays") - self.weekmask = state.pop("weekmask") - super().__setstate__(state) - - @apply_wraps - def apply(self, other): - if self.n <= 0: - roll = "forward" - else: - roll = "backward" - - if isinstance(other, datetime): - date_in = other - np_dt = np.datetime64(date_in.date()) - - np_incr_dt = np.busday_offset( - np_dt, self.n, roll=roll, busdaycal=self.calendar - ) - - dt_date = np_incr_dt.astype(datetime) - result = datetime.combine(dt_date, date_in.time()) - - if self.offset: - result = result + self.offset - return result - - elif isinstance(other, (timedelta, Tick)): - return BDay(self.n, offset=self.offset + other, normalize=self.normalize) - else: - raise ApplyTypeError( - "Only know how to combine trading day with " - "datetime, datetime64 or timedelta." - ) - - def apply_index(self, i): - raise NotImplementedError - - def is_on_offset(self, dt: datetime) -> bool: - if self.normalize and not is_normalized(dt): - return False - day64 = to_dt64D(dt) - return np.is_busday(day64, busdaycal=self.calendar) - - -class CustomBusinessHour(CustomMixin, BusinessHour): - """ - DateOffset subclass representing possibly n custom business days. - """ - - _prefix = "CBH" - _anchor = 0 - _attributes = frozenset( - ["n", "normalize", "weekmask", "holidays", "calendar", "start", "end", "offset"] - ) - - def __init__( - self, - n=1, - normalize=False, - weekmask="Mon Tue Wed Thu Fri", - holidays=None, - calendar=None, - start="09:00", - end="17:00", - offset=timedelta(0), - ): - BusinessHour.__init__(self, n, normalize, start=start, end=end, offset=offset) - CustomMixin.__init__(self, weekmask, holidays, calendar) - - def __reduce__(self): - # None for self.calendar bc np.busdaycalendar doesnt pickle nicely - return ( - type(self), - ( - self.n, - self.normalize, - self.weekmask, - self.holidays, - None, - self.start, - self.end, - self.offset, - ), - ) - - -# --------------------------------------------------------------------- -# Month-Based Offset Classes - - -@doc(bound="bound") -class _CustomBusinessMonth(CustomMixin, BusinessMixin, liboffsets.MonthOffset): - """ - DateOffset subclass representing custom business month(s). - - Increments between {bound} of month dates. - - Parameters - ---------- - n : int, default 1 - The number of months represented. - normalize : bool, default False - Normalize start/end dates to midnight before generating date range. - weekmask : str, Default 'Mon Tue Wed Thu Fri' - Weekmask of valid business days, passed to ``numpy.busdaycalendar``. - holidays : list - List/array of dates to exclude from the set of valid business days, - passed to ``numpy.busdaycalendar``. - calendar : pd.HolidayCalendar or np.busdaycalendar - Calendar to integrate. - offset : timedelta, default timedelta(0) - Time offset to apply. - """ - - _attributes = frozenset( - ["n", "normalize", "weekmask", "holidays", "calendar", "offset"] - ) - - is_on_offset = BaseOffset.is_on_offset # override MonthOffset method - apply_index = BaseOffset.apply_index # override MonthOffset method - - def __init__( - self, - n=1, - normalize=False, - weekmask="Mon Tue Wed Thu Fri", - holidays=None, - calendar=None, - offset=timedelta(0), - ): - BusinessMixin.__init__(self, n, normalize, offset) - CustomMixin.__init__(self, weekmask, holidays, calendar) - - def __reduce__(self): - # None for self.calendar bc np.busdaycalendar doesnt pickle nicely - return ( - type(self), - (self.n, self.normalize, self.weekmask, self.holidays, None, self.offset), - ) - - @cache_readonly - def cbday_roll(self): - """ - Define default roll function to be called in apply method. - """ - cbday = CustomBusinessDay(n=self.n, normalize=False, **self.kwds) - - if self._prefix.endswith("S"): - # MonthBegin - roll_func = cbday.rollforward - else: - # MonthEnd - roll_func = cbday.rollback - return roll_func - - @cache_readonly - def m_offset(self): - if self._prefix.endswith("S"): - # MonthBegin - moff = MonthBegin(n=1, normalize=False) - else: - # MonthEnd - moff = MonthEnd(n=1, normalize=False) - return moff - - @cache_readonly - def month_roll(self): - """ - Define default roll function to be called in apply method. - """ - if self._prefix.endswith("S"): - # MonthBegin - roll_func = self.m_offset.rollback - else: - # MonthEnd - roll_func = self.m_offset.rollforward - return roll_func - - @apply_wraps - def apply(self, other): - # First move to month offset - cur_month_offset_date = self.month_roll(other) - - # Find this custom month offset - compare_date = self.cbday_roll(cur_month_offset_date) - n = liboffsets.roll_convention(other.day, self.n, compare_date.day) - - new = cur_month_offset_date + n * self.m_offset - result = self.cbday_roll(new) - return result - - -@doc(_CustomBusinessMonth, bound="end") -class CustomBusinessMonthEnd(_CustomBusinessMonth): - _prefix = "CBM" - - -@doc(_CustomBusinessMonth, bound="beginning") -class CustomBusinessMonthBegin(_CustomBusinessMonth): - _prefix = "CBMS" - - -# --------------------------------------------------------------------- - -BDay = BusinessDay -BMonthEnd = BusinessMonthEnd -BMonthBegin = BusinessMonthBegin -CBMonthEnd = CustomBusinessMonthEnd -CBMonthBegin = CustomBusinessMonthBegin -CDay = CustomBusinessDay - -prefix_mapping = { - offset._prefix: offset - for offset in [ - YearBegin, # 'AS' - YearEnd, # 'A' - BYearBegin, # 'BAS' - BYearEnd, # 'BA' - BusinessDay, # 'B' - BusinessMonthBegin, # 'BMS' - BusinessMonthEnd, # 'BM' - BQuarterEnd, # 'BQ' - BQuarterBegin, # 'BQS' - BusinessHour, # 'BH' - CustomBusinessDay, # 'C' - CustomBusinessMonthEnd, # 'CBM' - CustomBusinessMonthBegin, # 'CBMS' - CustomBusinessHour, # 'CBH' - MonthEnd, # 'M' - MonthBegin, # 'MS' - Nano, # 'N' - SemiMonthEnd, # 'SM' - SemiMonthBegin, # 'SMS' - Week, # 'W' - Second, # 'S' - Minute, # 'T' - Micro, # 'U' - QuarterEnd, # 'Q' - QuarterBegin, # 'QS' - Milli, # 'L' - Hour, # 'H' - Day, # 'D' - WeekOfMonth, # 'WOM' - FY5253, - FY5253Quarter, - ] -}
this _doesnt_ make them all cdef classes yet; need to get #34345 figured out before that is doable.
https://api.github.com/repos/pandas-dev/pandas/pulls/34398
2020-05-27T00:51:00Z
2020-05-27T17:23:51Z
2020-05-27T17:23:51Z
2020-05-27T17:32:54Z
CLN: GH29547 Replace old string formatting with f-strings
diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py index b494dbd8a38fa..eed90b112d115 100644 --- a/asv_bench/benchmarks/timeseries.py +++ b/asv_bench/benchmarks/timeseries.py @@ -394,33 +394,42 @@ def time_dup_string_tzoffset_dates(self, cache): class DatetimeAccessor: - params = [None, "US/Eastern", "UTC", dateutil.tz.tzutc()] - param_names = "tz" - - def setup(self, tz): + params = ( + [None, "US/Eastern", "UTC", dateutil.tz.tzutc()], + ["%Y-%m-%d %H:%M:%S.%f%z", "%Y-%m-%d %H:%M:%S%z"], + ["T", "S", "NS"], + ) + param_names = ["tz", "fmt", "frequency"] + + def setup(self, tz, fmt, frequency): N = 100000 - self.series = Series(date_range(start="1/1/2000", periods=N, freq="T", tz=tz)) + self.series = Series( + date_range(start="1/1/2000", periods=N, freq=frequency, tz=tz) + ) - def time_dt_accessor(self, tz): + def time_dt_accessor(self, *args): self.series.dt - def time_dt_accessor_normalize(self, tz): + def time_dt_accessor_normalize(self, *args): self.series.dt.normalize() - def time_dt_accessor_month_name(self, tz): + def time_dt_accessor_month_name(self, *args): self.series.dt.month_name() - def time_dt_accessor_day_name(self, tz): + def time_dt_accessor_day_name(self, *args): self.series.dt.day_name() - def time_dt_accessor_time(self, tz): + def time_dt_accessor_time(self, *args): self.series.dt.time - def time_dt_accessor_date(self, tz): + def time_dt_accessor_date(self, *args): self.series.dt.date - def time_dt_accessor_year(self, tz): + def time_dt_accessor_year(self, *args): self.series.dt.year + def time_dt_accessor_strftime(self, _, fmt, *args): + self.series.dt.strftime(fmt) + from .pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/tslibs/timestamp.py b/asv_bench/benchmarks/tslibs/timestamp.py index 3ef9b814dd79e..cb714d4491312 100644 --- a/asv_bench/benchmarks/tslibs/timestamp.py +++ b/asv_bench/benchmarks/tslibs/timestamp.py @@ -109,6 +109,17 @@ def time_month_name(self, tz, freq): self.ts.month_name() +class TimestampMethods: + params = ["%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H:%M:%S.%f"] + param_names = ["fmt"] + + def setup(self, fmt): + self.ts = Timestamp("2020-05-23 18:06:13.123456789") + + def time_strftime(self, fmt): + self.ts.strftime(fmt) + + class TimestampOps: params = [None, "US/Eastern", pytz.UTC, dateutil.tz.tzutc()] param_names = ["tz"] diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index bfe2dcee40d5e..fdce4a6e2ecaa 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -660,6 +660,7 @@ Datetimelike - Bug in :meth:`DatetimeIndex.intersection` and :meth:`TimedeltaIndex.intersection` with results not having the correct ``name`` attribute (:issue:`33904`) - Bug in :meth:`DatetimeArray.__setitem__`, :meth:`TimedeltaArray.__setitem__`, :meth:`PeriodArray.__setitem__` incorrectly allowing values with ``int64`` dtype to be silently cast (:issue:`33717`) - Bug in subtracting :class:`TimedeltaIndex` from :class:`Period` incorrectly raising ``TypeError`` in some cases where it should succeed and ``IncompatibleFrequency`` in some cases where it should raise ``TypeError`` (:issue:`33883`) +- Bug in :meth:`Timestamp.strftime` did not display full nanosecond precision (:issue:`29461`) Timedelta ^^^^^^^^^ diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 12d44413e1350..100373de69dfd 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -7,6 +7,7 @@ construction requirements, we need to do object instantiation in python shadows the python class, where we do any heavy lifting. """ import warnings +import time as _time import numpy as np cimport numpy as cnp @@ -1469,6 +1470,22 @@ default 'raise' np.array([self.value], dtype='i8'), tz=self.tz)[0] return Timestamp(normalized_value).tz_localize(self.tz) + def strftime(self, format: str) -> str: + newformat = format + # only do additional processing if necessary + if self.nanosecond and '%f' in format: + newformat = [] + for ch in format: + if ch == 'f': + # remove accompanying % + newformat.pop() + # and put fractional seconds in its place + newformat.append(f"{self.microsecond * 1000 + self.nanosecond}") + else: + newformat.append(ch) + newformat = "".join(newformat) + return _time.strftime(newformat, self.timetuple()) + # Add the min and max fields at the class level cdef int64_t _NS_UPPER_BOUND = np.iinfo(np.int64).max diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py index cee7ac450e411..f803939d898bf 100644 --- a/pandas/tests/scalar/timestamp/test_timestamp.py +++ b/pandas/tests/scalar/timestamp/test_timestamp.py @@ -15,7 +15,7 @@ from pandas.compat.numpy import np_datetime64_compat import pandas.util._test_decorators as td -from pandas import NaT, Timedelta, Timestamp +from pandas import NaT, Timedelta, Timestamp, to_datetime import pandas._testing as tm from pandas.tseries import offsets @@ -381,6 +381,19 @@ def test_tz_conversion_freq(self, tz_naive_fixture): t2 = Timestamp("2019-01-02 12:00", tz="UTC", freq="T") assert t2.tz_convert(tz="UTC").freq == t2.freq + @pytest.mark.parametrize( + "_input,fmt,_output", + [ + ("2020-05-22 11:07:30", "%Y-%m-%d", "2020-05-22"), + ("2020-05-22 11:07:30.123456", "%Y-%m-%d %f", "2020-05-22 123456"), + ("2020-05-22 11:07:30.123456789", "%f", "123456789"), + ], + ) + def test_strftime(self, _input, fmt, _output): + ts = Timestamp(_input) + result = ts.strftime(fmt) + assert result == _output + class TestTimestampNsOperations: def test_nanosecond_string_parsing(self): @@ -442,6 +455,20 @@ def test_nanosecond_timestamp(self): assert t.value == expected assert t.nanosecond == 10 + @pytest.mark.parametrize( + "date", + [ + "2020-05-22 08:53:19.123456789", + "2020-05-22 08:53:19.123456", + "2020-05-22 08:53:19", + ], + ) + @pytest.mark.parametrize("fmt", ["%m/%d/%Y %H:%M:%S.%f", "%m%d%Y%H%M%S%f"]) + def test_nanosecond_roundtrip(self, date, fmt): + ts = Timestamp(date) + string = ts.strftime(fmt) + assert ts == to_datetime(string, format=fmt) + class TestTimestampToJulianDate: def test_compare_1700(self): diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py index fbb44408f01be..8de5fd6638c17 100644 --- a/pandas/util/_validators.py +++ b/pandas/util/_validators.py @@ -295,13 +295,13 @@ def validate_axis_style_args(data, args, kwargs, arg_name, method_name): raise TypeError(msg) msg = ( - "Interpreting call\n\t'.{method_name}(a, b)' as " - "\n\t'.{method_name}(index=a, columns=b)'.\nUse named " - "arguments to remove any ambiguity. In the future, using " - "positional arguments for 'index' or 'columns' will raise " - "a 'TypeError'." + f"Interpreting call\n\t'.{method_name}(a, b)' as " + + f"\n\t'.{method_name}(index=a, columns=b)'.\nUse named " + + f"arguments to remove any ambiguity. In the future, using " + + f"positional arguments for 'index' or 'columns' will raise " + + f"a 'TypeError'." ) - warnings.warn(msg.format(method_name=method_name), FutureWarning, stacklevel=4) + warnings.warn(msg, FutureWarning, stacklevel=4) out[data._get_axis_name(0)] = args[0] out[data._get_axis_name(1)] = args[1] else: @@ -370,12 +370,15 @@ def validate_percentile(q: Union[float, Iterable[float]]) -> np.ndarray: ------ ValueError if percentiles are not in given interval([0, 1]). """ - msg = "percentiles should all be in the interval [0, 1]. Try {0} instead." q_arr = np.asarray(q) + msg = ( + f"percentiles should all be in the interval [0, 1]. " + f"Try {q_arr / 100.0} instead." + ) if q_arr.ndim == 0: if not 0 <= q_arr <= 1: - raise ValueError(msg.format(q_arr / 100.0)) + raise ValueError(msg) else: if not all(0 <= qs <= 1 for qs in q_arr): - raise ValueError(msg.format(q_arr / 100.0)) + raise ValueError(msg) return q_arr
Is it normal for all my old commits on other branches to appear here? Am I doing something wrong?
https://api.github.com/repos/pandas-dev/pandas/pulls/34397
2020-05-26T22:13:31Z
2020-05-27T02:11:41Z
null
2020-05-27T02:11:48Z
DOC: added D-Tale to the visualizations section of Ecosystem page
diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst index 6c6a7f42d4b7e..62065f016e438 100644 --- a/doc/source/ecosystem.rst +++ b/doc/source/ecosystem.rst @@ -153,6 +153,23 @@ A good implementation for Python users is `has2k1/plotnine <https://github.com/h Spun off from the main pandas library, the `qtpandas <https://github.com/draperjames/qtpandas>`__ library enables DataFrame visualization and manipulation in PyQt4 and PySide applications. +`D-Tale <https://github.com/man-group/dtale>`__ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +D-Tale is a lightweight web client for visualizing pandas data structures. It +provides a rich spreadsheet-style grid which acts as a wrapper for a lot of +pandas functionality (query, sort, describe, corr...) so users can quickly +manipulate their data. There is also an interactive chart-builder using Plotly +Dash allowing users to build nice portable visualizations. D-Tale can be +invoked with the following command + +.. code:: python + + import dtale; dtale.show(df) + +D-Tale integrates seamlessly with jupyter notebooks, python terminals, kaggle +& Google Colab. Here are some demos of the `grid <http://alphatechadmin.pythonanywhere.com/>`__ +and `chart-builder <http://alphatechadmin.pythonanywhere.com/charts/4?chart_type=surface&query=&x=date&z=Col0&agg=raw&cpg=false&y=%5B%22security_id%22%5D>`__. .. _ecosystem.ide:
- [x] closes #34376 Added [D-Tale](https://github.com/man-group/dtale) to the visualizations section of [Pandas Ecosystem](https://pandas.pydata.org/docs/ecosystem.html) doc
https://api.github.com/repos/pandas-dev/pandas/pulls/34396
2020-05-26T22:02:51Z
2020-05-27T18:28:32Z
2020-05-27T18:28:32Z
2020-05-27T18:34:13Z
DOC: "Setting layouts" plots' label size GH34305
diff --git a/doc/source/user_guide/visualization.rst b/doc/source/user_guide/visualization.rst index 451ddf046416e..a813348a342c2 100644 --- a/doc/source/user_guide/visualization.rst +++ b/doc/source/user_guide/visualization.rst @@ -1332,7 +1332,7 @@ otherwise you will see a warning. .. ipython:: python - fig, axes = plt.subplots(4, 4, figsize=(6, 6)) + fig, axes = plt.subplots(4, 4, figsize=(9, 9)) plt.subplots_adjust(wspace=0.5, hspace=0.5) target1 = [axes[0][0], axes[1][1], axes[2][2], axes[3][3]] target2 = [axes[3][0], axes[2][1], axes[1][2], axes[0][3]] @@ -1369,6 +1369,7 @@ Another option is passing an ``ax`` argument to :meth:`Series.plot` to plot on a .. ipython:: python fig, axes = plt.subplots(nrows=2, ncols=2) + plt.subplots_adjust(wspace=0.2, hspace=0.5) df['A'].plot(ax=axes[0, 0]); axes[0, 0].set_title('A'); df['B'].plot(ax=axes[0, 1]);
- [X] closes #34305 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/34394
2020-05-26T20:19:42Z
2020-06-07T21:23:10Z
2020-06-07T21:23:10Z
2020-06-07T21:23:16Z
DOC: add a semicolon to hide long matplotlib axes return value
diff --git a/doc/source/user_guide/visualization.rst b/doc/source/user_guide/visualization.rst index 451ddf046416e..814627043cfc8 100644 --- a/doc/source/user_guide/visualization.rst +++ b/doc/source/user_guide/visualization.rst @@ -821,7 +821,7 @@ You can create a scatter plot matrix using the df = pd.DataFrame(np.random.randn(1000, 4), columns=['a', 'b', 'c', 'd']) @savefig scatter_matrix_kde.png - scatter_matrix(df, alpha=0.2, figsize=(6, 6), diagonal='kde') + scatter_matrix(df, alpha=0.2, figsize=(6, 6), diagonal='kde'); .. ipython:: python :suppress: @@ -1683,4 +1683,4 @@ to generate the plots. Some libraries implementing a backend for pandas are list on the ecosystem :ref:`ecosystem.visualization` page. Developers guide can be found at -https://dev.pandas.io/docs/development/extending.html#plotting-backends \ No newline at end of file +https://pandas.pydata.org/docs/dev/development/extending.html#plotting-backends
add a semicolon to hide debug messages like: Out[85]: array([[<matplotlib.axes._subplots.AxesSubplot object at 0x7f3d0ad6e3d0>, <matplotlib.axes._subplots.AxesSubplot object at 0x7f3d0ad4a350>, <matplotlib.axes._subplots.AxesSubplot object at 0x7f3d0aa10250>, <matplotlib.axes._subplots.AxesSubplot object at 0x7f3d0a6367d0>], [<matplotlib.axes._subplots.AxesSubplot object at 0x7f3d088c2d50>, <matplotlib.axes._subplots.AxesSubplot object at 0x7f3d089ea310>, <matplotlib.axes._subplots.AxesSubplot object at 0x7f3d08d89890>, <matplotlib.axes._subplots.AxesSubplot object at 0x7f3d0a76ae10>], [<matplotlib.axes._subplots.AxesSubplot object at 0x7f3d0a756090>, <matplotlib.axes._subplots.AxesSubplot object at 0x7f3d0a8dd750>, <matplotlib.axes._subplots.AxesSubplot object at 0x7f3d096aaed0>, <matplotlib.axes._subplots.AxesSubplot object at 0x7f3d0937d490>], [<matplotlib.axes._subplots.AxesSubplot object at 0x7f3d09f49a10>, <matplotlib.axes._subplots.AxesSubplot object at 0x7f3d0a0551d0>, <matplotlib.axes._subplots.AxesSubplot object at 0x7f3d0a418550>, <matplotlib.axes._subplots.AxesSubplot object at 0x7f3d0a6f0ad0>]], dtype=object) which you can see here: https://pandas.pydata.org/docs/user_guide/visualization.html - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/34393
2020-05-26T18:51:37Z
2020-05-28T20:24:31Z
2020-05-28T20:24:31Z
2020-05-28T21:17:00Z
ENH: Set index of returned dataframe in DatetimeIndex.isocalendar
diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst index a4e458032b787..5351c3ee6b624 100644 --- a/doc/source/user_guide/timeseries.rst +++ b/doc/source/user_guide/timeseries.rst @@ -793,6 +793,7 @@ You may obtain the year, week and day components of the ISO year from the ISO 86 .. ipython:: python idx = pd.date_range(start='2019-12-29', freq='D', periods=4) + idx.isocalendar() idx.to_series().dt.isocalendar() .. _timeseries.offsets: diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index c33cd505d0948..8b141965bbfe1 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -209,7 +209,7 @@ Other enhancements - :class:`Series.str` now has a `fullmatch` method that matches a regular expression against the entire string in each row of the series, similar to `re.fullmatch` (:issue:`32806`). - :meth:`DataFrame.sample` will now also allow array-like and BitGenerator objects to be passed to ``random_state`` as seeds (:issue:`32503`) - :meth:`MultiIndex.union` will now raise `RuntimeWarning` if the object inside are unsortable, pass `sort=False` to suppress this warning (:issue:`33015`) -- :class:`Series.dt` and :class:`DatatimeIndex` now have an `isocalendar` method that returns a :class:`DataFrame` with year, week, and day calculated according to the ISO 8601 calendar (:issue:`33206`). +- :class:`Series.dt` and :class:`DatatimeIndex` now have an `isocalendar` method that returns a :class:`DataFrame` with year, week, and day calculated according to the ISO 8601 calendar (:issue:`33206`, :issue:`34392`). - The :meth:`DataFrame.to_feather` method now supports additional keyword arguments (e.g. to set the compression) that are added in pyarrow 0.17 (:issue:`33422`). diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 50d792aeb12f4..894a519cb693e 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -1272,17 +1272,17 @@ def isocalendar(self): -------- >>> idx = pd.date_range(start='2019-12-29', freq='D', periods=4) >>> idx.isocalendar() - year week day - 0 2019 52 7 - 1 2020 1 1 - 2 2020 1 2 - 3 2020 1 3 + year week day + 2019-12-29 2019 52 7 + 2019-12-30 2020 1 1 + 2019-12-31 2020 1 2 + 2020-01-01 2020 1 3 >>> idx.isocalendar().week - 0 52 - 1 1 - 2 1 - 3 1 - Name: week, dtype: UInt32 + 2019-12-29 52 + 2019-12-30 1 + 2019-12-31 1 + 2020-01-01 1 + Freq: D, Name: week, dtype: UInt32 """ from pandas import DataFrame diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 47c50dd2c7b14..9a1e750c5de93 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -66,7 +66,7 @@ def _new_DatetimeIndex(cls, d): @inherit_names( - ["to_period", "to_perioddelta", "to_julian_date", "strftime"] + ["to_period", "to_perioddelta", "to_julian_date", "strftime", "isocalendar"] + DatetimeArray._field_ops + DatetimeArray._datetimelike_methods, DatetimeArray, @@ -90,7 +90,6 @@ def _new_DatetimeIndex(cls, d): "date", "time", "timetz", - "isocalendar", ] + DatetimeArray._bool_ops, DatetimeArray, diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py index badf6502aa723..c9367b7e2ee1d 100644 --- a/pandas/core/indexes/extension.py +++ b/pandas/core/indexes/extension.py @@ -10,7 +10,7 @@ from pandas.util._decorators import cache_readonly, doc from pandas.core.dtypes.common import is_dtype_equal, is_object_dtype -from pandas.core.dtypes.generic import ABCSeries +from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries from pandas.core.arrays import ExtensionArray from pandas.core.indexers import deprecate_ndim_indexing @@ -55,6 +55,8 @@ def fget(self): if wrap: if isinstance(result, type(self._data)): return type(self)._simple_new(result, name=self.name) + elif isinstance(result, ABCDataFrame): + return result.set_index(self) return Index(result, name=self.name) return result @@ -77,6 +79,8 @@ def method(self, *args, **kwargs): if wrap: if isinstance(result, type(self._data)): return type(self)._simple_new(result, name=self.name) + elif isinstance(result, ABCDataFrame): + return result.set_index(self) return Index(result, name=self.name) return result diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py index 7997247ca0307..b3347b3c64e6c 100644 --- a/pandas/tests/groupby/transform/test_transform.py +++ b/pandas/tests/groupby/transform/test_transform.py @@ -1022,7 +1022,7 @@ def test_groupby_transform_with_datetimes(func, values): dates = pd.date_range("1/1/2011", periods=10, freq="D") stocks = pd.DataFrame({"price": np.arange(10.0)}, index=dates) - stocks["week_id"] = dates.isocalendar().set_index(dates).week + stocks["week_id"] = dates.isocalendar().week result = stocks.groupby(stocks["week_id"])["price"].transform(func) diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py index b9373328eb87f..51841727d510b 100644 --- a/pandas/tests/indexes/datetimes/test_misc.py +++ b/pandas/tests/indexes/datetimes/test_misc.py @@ -404,6 +404,7 @@ def test_isocalendar_returns_correct_values_close_to_new_year_with_tz(): expected_data_frame = pd.DataFrame( [[2013, 52, 7], [2014, 1, 1], [2014, 1, 2]], columns=["year", "week", "day"], + index=dates, dtype="UInt32", ) tm.assert_frame_equal(result, expected_data_frame)
This is a followup on #33595, (specifically [here](https://github.com/pandas-dev/pandas/pull/33595#issuecomment-614888200) and [here](https://github.com/pandas-dev/pandas/pull/33595#issuecomment-614897268)), where it was noted that `DatetimeIndex.isocalendar` should set the index of the returned dataframe.
https://api.github.com/repos/pandas-dev/pandas/pulls/34392
2020-05-26T18:36:43Z
2020-05-27T12:36:13Z
2020-05-27T12:36:13Z
2021-10-29T14:22:50Z
REF: BusinessDay, BusinessHour to liboffsets
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 4cf8eeb52475b..984c0c702c28d 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -1,5 +1,6 @@ import cython +import operator import time from typing import Any import warnings @@ -1163,7 +1164,140 @@ cdef class BusinessMixin(SingleConstructorOffset): BaseOffset.__setstate__(self, state) -cdef class BusinessHourMixin(BusinessMixin): +cdef class BusinessDay(BusinessMixin): + """ + DateOffset subclass representing possibly n business days. + """ + + _prefix = "B" + _attributes = frozenset(["n", "normalize", "offset"]) + + cpdef __setstate__(self, state): + self.n = state.pop("n") + self.normalize = state.pop("normalize") + if "_offset" in state: + self._offset = state.pop("_offset") + elif "offset" in state: + self._offset = state.pop("offset") + + def __reduce__(self): + tup = (self.n, self.normalize, self.offset) + return type(self), tup + + @property + def _params(self): + # FIXME: using cache_readonly breaks a pytables test + return BaseOffset._params.func(self) + + def _offset_str(self) -> str: + def get_str(td): + off_str = "" + if td.days > 0: + off_str += str(td.days) + "D" + if td.seconds > 0: + s = td.seconds + hrs = int(s / 3600) + if hrs != 0: + off_str += str(hrs) + "H" + s -= hrs * 3600 + mts = int(s / 60) + if mts != 0: + off_str += str(mts) + "Min" + s -= mts * 60 + if s != 0: + off_str += str(s) + "s" + if td.microseconds > 0: + off_str += str(td.microseconds) + "us" + return off_str + + if isinstance(self.offset, timedelta): + zero = timedelta(0, 0, 0) + if self.offset >= zero: + off_str = "+" + get_str(self.offset) + else: + off_str = "-" + get_str(-self.offset) + return off_str + else: + return "+" + repr(self.offset) + + @apply_wraps + def apply(self, other): + if isinstance(other, datetime): + n = self.n + wday = other.weekday() + + # avoid slowness below by operating on weeks first + weeks = n // 5 + if n <= 0 and wday > 4: + # roll forward + n += 1 + + n -= 5 * weeks + + # n is always >= 0 at this point + if n == 0 and wday > 4: + # roll back + days = 4 - wday + elif wday > 4: + # roll forward + days = (7 - wday) + (n - 1) + elif wday + n <= 4: + # shift by n days without leaving the current week + days = n + else: + # shift by n days plus 2 to get past the weekend + days = n + 2 + + result = other + timedelta(days=7 * weeks + days) + if self.offset: + result = result + self.offset + return result + + elif isinstance(other, (timedelta, Tick)): + return BusinessDay( + self.n, offset=self.offset + other, normalize=self.normalize + ) + else: + raise ApplyTypeError( + "Only know how to combine business day with datetime or timedelta." + ) + + @apply_index_wraps + def apply_index(self, dtindex): + time = dtindex.to_perioddelta("D") + # to_period rolls forward to next BDay; track and + # reduce n where it does when rolling forward + asper = dtindex.to_period("B") + + if self.n > 0: + shifted = (dtindex.to_perioddelta("B") - time).asi8 != 0 + + # Integer-array addition is deprecated, so we use + # _time_shift directly + roll = np.where(shifted, self.n - 1, self.n) + shifted = asper._addsub_int_array(roll, operator.add) + else: + # Integer addition is deprecated, so we use _time_shift directly + roll = self.n + shifted = asper._time_shift(roll) + + result = shifted.to_timestamp() + time + return result + + def is_on_offset(self, dt) -> bool: + if self.normalize and not is_normalized(dt): + return False + return dt.weekday() < 5 + + +cdef class BusinessHour(BusinessMixin): + """ + DateOffset subclass representing possibly n business hours. + """ + + _prefix = "BH" + _anchor = 0 + _attributes = frozenset(["n", "normalize", "start", "end", "offset"]) _adjust_dst = False cdef readonly: @@ -1218,6 +1352,18 @@ cdef class BusinessHourMixin(BusinessMixin): self.start = start self.end = end + cpdef __setstate__(self, state): + start = state.pop("start") + start = (start,) if np.ndim(start) == 0 else tuple(start) + end = state.pop("end") + end = (end,) if np.ndim(end) == 0 else tuple(end) + self.start = start + self.end = end + + state.pop("kwds", {}) + state.pop("next_bday", None) + BusinessMixin.__setstate__(self, state) + def __reduce__(self): return type(self), (self.n, self.normalize, self.start, self.end, self.offset) @@ -1262,6 +1408,267 @@ cdef class BusinessHourMixin(BusinessMixin): ) assert False + @cache_readonly + def next_bday(self): + """ + Used for moving to next business day. + """ + if self.n >= 0: + nb_offset = 1 + else: + nb_offset = -1 + if self._prefix.startswith("C"): + # CustomBusinessHour + from pandas.tseries.offsets import CustomBusinessDay + return CustomBusinessDay( + n=nb_offset, + weekmask=self.weekmask, + holidays=self.holidays, + calendar=self.calendar, + ) + else: + return BusinessDay(n=nb_offset) + + def _next_opening_time(self, other, sign=1): + """ + If self.n and sign have the same sign, return the earliest opening time + later than or equal to current time. + Otherwise the latest opening time earlier than or equal to current + time. + + Opening time always locates on BusinessDay. + However, closing time may not if business hour extends over midnight. + + Parameters + ---------- + other : datetime + Current time. + sign : int, default 1. + Either 1 or -1. Going forward in time if it has the same sign as + self.n. Going backward in time otherwise. + + Returns + ------- + result : datetime + Next opening time. + """ + earliest_start = self.start[0] + latest_start = self.start[-1] + + if not self.next_bday.is_on_offset(other): + # today is not business day + other = other + sign * self.next_bday + if self.n * sign >= 0: + hour, minute = earliest_start.hour, earliest_start.minute + else: + hour, minute = latest_start.hour, latest_start.minute + else: + if self.n * sign >= 0: + if latest_start < other.time(): + # current time is after latest starting time in today + other = other + sign * self.next_bday + hour, minute = earliest_start.hour, earliest_start.minute + else: + # find earliest starting time no earlier than current time + for st in self.start: + if other.time() <= st: + hour, minute = st.hour, st.minute + break + else: + if other.time() < earliest_start: + # current time is before earliest starting time in today + other = other + sign * self.next_bday + hour, minute = latest_start.hour, latest_start.minute + else: + # find latest starting time no later than current time + for st in reversed(self.start): + if other.time() >= st: + hour, minute = st.hour, st.minute + break + + return datetime(other.year, other.month, other.day, hour, minute) + + def _prev_opening_time(self, other): + """ + If n is positive, return the latest opening time earlier than or equal + to current time. + Otherwise the earliest opening time later than or equal to current + time. + + Parameters + ---------- + other : datetime + Current time. + + Returns + ------- + result : datetime + Previous opening time. + """ + return self._next_opening_time(other, sign=-1) + + @apply_wraps + def rollback(self, dt): + """ + Roll provided date backward to next offset only if not on offset. + """ + if not self.is_on_offset(dt): + if self.n >= 0: + dt = self._prev_opening_time(dt) + else: + dt = self._next_opening_time(dt) + return self._get_closing_time(dt) + return dt + + @apply_wraps + def rollforward(self, dt): + """ + Roll provided date forward to next offset only if not on offset. + """ + if not self.is_on_offset(dt): + if self.n >= 0: + return self._next_opening_time(dt) + else: + return self._prev_opening_time(dt) + return dt + + @apply_wraps + def apply(self, other): + if isinstance(other, datetime): + # used for detecting edge condition + nanosecond = getattr(other, "nanosecond", 0) + # reset timezone and nanosecond + # other may be a Timestamp, thus not use replace + other = datetime( + other.year, + other.month, + other.day, + other.hour, + other.minute, + other.second, + other.microsecond, + ) + n = self.n + + # adjust other to reduce number of cases to handle + if n >= 0: + if other.time() in self.end or not self._is_on_offset(other): + other = self._next_opening_time(other) + else: + if other.time() in self.start: + # adjustment to move to previous business day + other = other - timedelta(seconds=1) + if not self._is_on_offset(other): + other = self._next_opening_time(other) + other = self._get_closing_time(other) + + # get total business hours by sec in one business day + businesshours = sum( + self._get_business_hours_by_sec(st, en) + for st, en in zip(self.start, self.end) + ) + + bd, r = divmod(abs(n * 60), businesshours // 60) + if n < 0: + bd, r = -bd, -r + + # adjust by business days first + if bd != 0: + if self._prefix.startswith("C"): + # GH#30593 this is a Custom offset + from pandas.tseries.offsets import CustomBusinessDay + skip_bd = CustomBusinessDay( + n=bd, + weekmask=self.weekmask, + holidays=self.holidays, + calendar=self.calendar, + ) + else: + skip_bd = BusinessDay(n=bd) + # midnight business hour may not on BusinessDay + if not self.next_bday.is_on_offset(other): + prev_open = self._prev_opening_time(other) + remain = other - prev_open + other = prev_open + skip_bd + remain + else: + other = other + skip_bd + + # remaining business hours to adjust + bhour_remain = timedelta(minutes=r) + + if n >= 0: + while bhour_remain != timedelta(0): + # business hour left in this business time interval + bhour = ( + self._get_closing_time(self._prev_opening_time(other)) - other + ) + if bhour_remain < bhour: + # finish adjusting if possible + other += bhour_remain + bhour_remain = timedelta(0) + else: + # go to next business time interval + bhour_remain -= bhour + other = self._next_opening_time(other + bhour) + else: + while bhour_remain != timedelta(0): + # business hour left in this business time interval + bhour = self._next_opening_time(other) - other + if ( + bhour_remain > bhour + or bhour_remain == bhour + and nanosecond != 0 + ): + # finish adjusting if possible + other += bhour_remain + bhour_remain = timedelta(0) + else: + # go to next business time interval + bhour_remain -= bhour + other = self._get_closing_time( + self._next_opening_time( + other + bhour - timedelta(seconds=1) + ) + ) + + return other + else: + raise ApplyTypeError("Only know how to combine business hour with datetime") + + def is_on_offset(self, dt): + if self.normalize and not is_normalized(dt): + return False + + if dt.tzinfo is not None: + dt = datetime( + dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond + ) + # Valid BH can be on the different BusinessDay during midnight + # Distinguish by the time spent from previous opening time + return self._is_on_offset(dt) + + def _is_on_offset(self, dt): + """ + Slight speedups using calculated values. + """ + # if self.normalize and not is_normalized(dt): + # return False + # Valid BH can be on the different BusinessDay during midnight + # Distinguish by the time spent from previous opening time + if self.n >= 0: + op = self._prev_opening_time(dt) + else: + op = self._next_opening_time(dt) + span = (dt - op).total_seconds() + businesshours = 0 + for i, st in enumerate(self.start): + if op.hour == st.hour and op.minute == st.minute: + businesshours = self._get_business_hours_by_sec(st, self.end[i]) + if span <= businesshours: + return True + else: + return False + class CustomMixin: """ diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index c3ad48d5ce34d..5cf70229cd5e7 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -17,6 +17,8 @@ BaseOffset, BQuarterBegin, BQuarterEnd, + BusinessDay, + BusinessHour, BusinessMixin, BusinessMonthBegin, BusinessMonthEnd, @@ -204,385 +206,6 @@ def __add__(date): pass -class BusinessDay(BusinessMixin): - """ - DateOffset subclass representing possibly n business days. - """ - - _prefix = "B" - _attributes = frozenset(["n", "normalize", "offset"]) - - def __reduce__(self): - tup = (self.n, self.normalize, self.offset) - return type(self), tup - - def _offset_str(self) -> str: - def get_str(td): - off_str = "" - if td.days > 0: - off_str += str(td.days) + "D" - if td.seconds > 0: - s = td.seconds - hrs = int(s / 3600) - if hrs != 0: - off_str += str(hrs) + "H" - s -= hrs * 3600 - mts = int(s / 60) - if mts != 0: - off_str += str(mts) + "Min" - s -= mts * 60 - if s != 0: - off_str += str(s) + "s" - if td.microseconds > 0: - off_str += str(td.microseconds) + "us" - return off_str - - if isinstance(self.offset, timedelta): - zero = timedelta(0, 0, 0) - if self.offset >= zero: - off_str = "+" + get_str(self.offset) - else: - off_str = "-" + get_str(-self.offset) - return off_str - else: - return "+" + repr(self.offset) - - @apply_wraps - def apply(self, other): - if isinstance(other, datetime): - n = self.n - wday = other.weekday() - - # avoid slowness below by operating on weeks first - weeks = n // 5 - if n <= 0 and wday > 4: - # roll forward - n += 1 - - n -= 5 * weeks - - # n is always >= 0 at this point - if n == 0 and wday > 4: - # roll back - days = 4 - wday - elif wday > 4: - # roll forward - days = (7 - wday) + (n - 1) - elif wday + n <= 4: - # shift by n days without leaving the current week - days = n - else: - # shift by n days plus 2 to get past the weekend - days = n + 2 - - result = other + timedelta(days=7 * weeks + days) - if self.offset: - result = result + self.offset - return result - - elif isinstance(other, (timedelta, Tick)): - return BDay(self.n, offset=self.offset + other, normalize=self.normalize) - else: - raise ApplyTypeError( - "Only know how to combine business day with datetime or timedelta." - ) - - @apply_index_wraps - def apply_index(self, i): - time = i.to_perioddelta("D") - # to_period rolls forward to next BDay; track and - # reduce n where it does when rolling forward - asper = i.to_period("B") - - if self.n > 0: - shifted = (i.to_perioddelta("B") - time).asi8 != 0 - - # Integer-array addition is deprecated, so we use - # _time_shift directly - roll = np.where(shifted, self.n - 1, self.n) - shifted = asper._addsub_int_array(roll, operator.add) - else: - # Integer addition is deprecated, so we use _time_shift directly - roll = self.n - shifted = asper._time_shift(roll) - - result = shifted.to_timestamp() + time - return result - - def is_on_offset(self, dt: datetime) -> bool: - if self.normalize and not is_normalized(dt): - return False - return dt.weekday() < 5 - - -class BusinessHour(liboffsets.BusinessHourMixin): - """ - DateOffset subclass representing possibly n business hours. - """ - - _prefix = "BH" - _anchor = 0 - _attributes = frozenset(["n", "normalize", "start", "end", "offset"]) - - @cache_readonly - def next_bday(self): - """ - Used for moving to next business day. - """ - if self.n >= 0: - nb_offset = 1 - else: - nb_offset = -1 - if self._prefix.startswith("C"): - # CustomBusinessHour - return CustomBusinessDay( - n=nb_offset, - weekmask=self.weekmask, - holidays=self.holidays, - calendar=self.calendar, - ) - else: - return BusinessDay(n=nb_offset) - - def _next_opening_time(self, other, sign=1): - """ - If self.n and sign have the same sign, return the earliest opening time - later than or equal to current time. - Otherwise the latest opening time earlier than or equal to current - time. - - Opening time always locates on BusinessDay. - However, closing time may not if business hour extends over midnight. - - Parameters - ---------- - other : datetime - Current time. - sign : int, default 1. - Either 1 or -1. Going forward in time if it has the same sign as - self.n. Going backward in time otherwise. - - Returns - ------- - result : datetime - Next opening time. - """ - earliest_start = self.start[0] - latest_start = self.start[-1] - - if not self.next_bday.is_on_offset(other): - # today is not business day - other = other + sign * self.next_bday - if self.n * sign >= 0: - hour, minute = earliest_start.hour, earliest_start.minute - else: - hour, minute = latest_start.hour, latest_start.minute - else: - if self.n * sign >= 0: - if latest_start < other.time(): - # current time is after latest starting time in today - other = other + sign * self.next_bday - hour, minute = earliest_start.hour, earliest_start.minute - else: - # find earliest starting time no earlier than current time - for st in self.start: - if other.time() <= st: - hour, minute = st.hour, st.minute - break - else: - if other.time() < earliest_start: - # current time is before earliest starting time in today - other = other + sign * self.next_bday - hour, minute = latest_start.hour, latest_start.minute - else: - # find latest starting time no later than current time - for st in reversed(self.start): - if other.time() >= st: - hour, minute = st.hour, st.minute - break - - return datetime(other.year, other.month, other.day, hour, minute) - - def _prev_opening_time(self, other): - """ - If n is positive, return the latest opening time earlier than or equal - to current time. - Otherwise the earliest opening time later than or equal to current - time. - - Parameters - ---------- - other : datetime - Current time. - - Returns - ------- - result : datetime - Previous opening time. - """ - return self._next_opening_time(other, sign=-1) - - @apply_wraps - def rollback(self, dt): - """ - Roll provided date backward to next offset only if not on offset. - """ - if not self.is_on_offset(dt): - if self.n >= 0: - dt = self._prev_opening_time(dt) - else: - dt = self._next_opening_time(dt) - return self._get_closing_time(dt) - return dt - - @apply_wraps - def rollforward(self, dt): - """ - Roll provided date forward to next offset only if not on offset. - """ - if not self.is_on_offset(dt): - if self.n >= 0: - return self._next_opening_time(dt) - else: - return self._prev_opening_time(dt) - return dt - - @apply_wraps - def apply(self, other): - if isinstance(other, datetime): - # used for detecting edge condition - nanosecond = getattr(other, "nanosecond", 0) - # reset timezone and nanosecond - # other may be a Timestamp, thus not use replace - other = datetime( - other.year, - other.month, - other.day, - other.hour, - other.minute, - other.second, - other.microsecond, - ) - n = self.n - - # adjust other to reduce number of cases to handle - if n >= 0: - if other.time() in self.end or not self._is_on_offset(other): - other = self._next_opening_time(other) - else: - if other.time() in self.start: - # adjustment to move to previous business day - other = other - timedelta(seconds=1) - if not self._is_on_offset(other): - other = self._next_opening_time(other) - other = self._get_closing_time(other) - - # get total business hours by sec in one business day - businesshours = sum( - self._get_business_hours_by_sec(st, en) - for st, en in zip(self.start, self.end) - ) - - bd, r = divmod(abs(n * 60), businesshours // 60) - if n < 0: - bd, r = -bd, -r - - # adjust by business days first - if bd != 0: - if isinstance(self, CustomMixin): # GH 30593 - skip_bd = CustomBusinessDay( - n=bd, - weekmask=self.weekmask, - holidays=self.holidays, - calendar=self.calendar, - ) - else: - skip_bd = BusinessDay(n=bd) - # midnight business hour may not on BusinessDay - if not self.next_bday.is_on_offset(other): - prev_open = self._prev_opening_time(other) - remain = other - prev_open - other = prev_open + skip_bd + remain - else: - other = other + skip_bd - - # remaining business hours to adjust - bhour_remain = timedelta(minutes=r) - - if n >= 0: - while bhour_remain != timedelta(0): - # business hour left in this business time interval - bhour = ( - self._get_closing_time(self._prev_opening_time(other)) - other - ) - if bhour_remain < bhour: - # finish adjusting if possible - other += bhour_remain - bhour_remain = timedelta(0) - else: - # go to next business time interval - bhour_remain -= bhour - other = self._next_opening_time(other + bhour) - else: - while bhour_remain != timedelta(0): - # business hour left in this business time interval - bhour = self._next_opening_time(other) - other - if ( - bhour_remain > bhour - or bhour_remain == bhour - and nanosecond != 0 - ): - # finish adjusting if possible - other += bhour_remain - bhour_remain = timedelta(0) - else: - # go to next business time interval - bhour_remain -= bhour - other = self._get_closing_time( - self._next_opening_time( - other + bhour - timedelta(seconds=1) - ) - ) - - return other - else: - raise ApplyTypeError("Only know how to combine business hour with datetime") - - def is_on_offset(self, dt): - if self.normalize and not is_normalized(dt): - return False - - if dt.tzinfo is not None: - dt = datetime( - dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond - ) - # Valid BH can be on the different BusinessDay during midnight - # Distinguish by the time spent from previous opening time - return self._is_on_offset(dt) - - def _is_on_offset(self, dt): - """ - Slight speedups using calculated values. - """ - # if self.normalize and not is_normalized(dt): - # return False - # Valid BH can be on the different BusinessDay during midnight - # Distinguish by the time spent from previous opening time - if self.n >= 0: - op = self._prev_opening_time(dt) - else: - op = self._next_opening_time(dt) - span = (dt - op).total_seconds() - businesshours = 0 - for i, st in enumerate(self.start): - if op.hour == st.hour and op.minute == st.minute: - businesshours = self._get_business_hours_by_sec(st, self.end[i]) - if span <= businesshours: - return True - else: - return False - - class CustomBusinessDay(CustomMixin, BusinessDay): """ DateOffset subclass representing custom business days excluding holidays. @@ -624,6 +247,11 @@ def __init__( BusinessDay.__init__(self, n, normalize, offset) CustomMixin.__init__(self, weekmask, holidays, calendar) + def __setstate__(self, state): + self.holidays = state.pop("holidays") + self.weekmask = state.pop("weekmask") + super().__setstate__(state) + @apply_wraps def apply(self, other): if self.n <= 0:
https://api.github.com/repos/pandas-dev/pandas/pulls/34391
2020-05-26T18:04:15Z
2020-05-26T22:14:43Z
2020-05-26T22:14:43Z
2020-05-26T22:20:16Z