title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
BUG/CLN: LinePlot uses incorrect xlim when secondary_y=True
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 9e992573f568d..4ef0110013925 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -148,6 +148,11 @@ Performance + +- Bug in line plot doesn't set correct ``xlim`` if ``secondary_y=True`` (:issue:`7459`) + + + Experimental ~~~~~~~~~~~~ diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index dfdd37c468a85..b195e668a731b 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -457,6 +457,20 @@ def test_plot_figsize_and_title(self): self._check_text_labels(ax.title, 'Test') self._check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16, 8)) + def test_ts_line_lim(self): + ax = self.ts.plot() + xmin, xmax = ax.get_xlim() + lines = ax.get_lines() + self.assertEqual(xmin, lines[0].get_data(orig=False)[0][0]) + self.assertEqual(xmax, lines[0].get_data(orig=False)[0][-1]) + tm.close() + + ax = self.ts.plot(secondary_y=True) + xmin, xmax = ax.get_xlim() + lines = ax.get_lines() + self.assertEqual(xmin, lines[0].get_data(orig=False)[0][0]) + self.assertEqual(xmax, lines[0].get_data(orig=False)[0][-1]) + def test_ts_area_lim(self): ax = self.ts.plot(kind='area', stacked=False) xmin, xmax = ax.get_xlim() @@ -1091,6 +1105,27 @@ def test_line_area_nan_df(self): self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1) self.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected2) + def test_line_lim(self): + df = DataFrame(rand(6, 3), columns=['x', 'y', 'z']) + ax = df.plot() + xmin, xmax = ax.get_xlim() + lines = ax.get_lines() + self.assertEqual(xmin, lines[0].get_data()[0][0]) + self.assertEqual(xmax, lines[0].get_data()[0][-1]) + + ax = df.plot(secondary_y=True) + xmin, xmax = ax.get_xlim() + lines = ax.get_lines() + self.assertEqual(xmin, lines[0].get_data()[0][0]) + self.assertEqual(xmax, lines[0].get_data()[0][-1]) + + axes = df.plot(secondary_y=True, subplots=True) + for ax in axes: + xmin, xmax = ax.get_xlim() + lines = ax.get_lines() + self.assertEqual(xmin, lines[0].get_data()[0][0]) + self.assertEqual(xmax, lines[0].get_data()[0][-1]) + def test_area_lim(self): df = DataFrame(rand(6, 4), columns=['x', 'y', 'z', 'four']) diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 7f2f583c5e20e..b09896788a21d 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -909,13 +909,9 @@ def _args_adjust(self): pass def _maybe_right_yaxis(self, ax): - _types = (list, tuple, np.ndarray) - sec_true = isinstance(self.secondary_y, bool) and self.secondary_y - list_sec = isinstance(self.secondary_y, _types) - has_sec = list_sec and len(self.secondary_y) > 0 - all_sec = list_sec and len(self.secondary_y) == self.nseries - - if (sec_true or has_sec) and not hasattr(ax, 'right_ax'): + if hasattr(ax, 'right_ax'): + return ax.right_ax + else: orig_ax, new_ax = ax, ax.twinx() new_ax._get_lines.color_cycle = orig_ax._get_lines.color_cycle @@ -924,38 +920,25 @@ def _maybe_right_yaxis(self, ax): if len(orig_ax.get_lines()) == 0: # no data on left y orig_ax.get_yaxis().set_visible(False) - - if len(new_ax.get_lines()) == 0: - new_ax.get_yaxis().set_visible(False) - - if sec_true or all_sec: - ax = new_ax - else: - ax.get_yaxis().set_visible(True) - - return ax + return new_ax def _setup_subplots(self): if self.subplots: nrows, ncols = self._get_layout() fig, axes = _subplots(nrows=nrows, ncols=ncols, sharex=self.sharex, sharey=self.sharey, - figsize=self.figsize, ax=self.ax, - secondary_y=self.secondary_y, - data=self.data) + figsize=self.figsize, ax=self.ax) if not com.is_list_like(axes): axes = np.array([axes]) else: if self.ax is None: fig = self.plt.figure(figsize=self.figsize) ax = fig.add_subplot(111) - ax = self._maybe_right_yaxis(ax) else: fig = self.ax.get_figure() if self.figsize is not None: fig.set_size_inches(self.figsize) - ax = self._maybe_right_yaxis(self.ax) - + ax = self.ax axes = [ax] if self.logx or self.loglog: @@ -1182,14 +1165,21 @@ def _get_ax(self, i): # get the twinx ax if appropriate if self.subplots: ax = self.axes[i] + + if self.on_right(i): + ax = self._maybe_right_yaxis(ax) + self.axes[i] = ax else: ax = self.axes[0] - if self.on_right(i): - if hasattr(ax, 'right_ax'): - ax = ax.right_ax - elif hasattr(ax, 'left_ax'): - ax = ax.left_ax + if self.on_right(i): + ax = self._maybe_right_yaxis(ax) + + sec_true = isinstance(self.secondary_y, bool) and self.secondary_y + all_sec = (com.is_list_like(self.secondary_y) and + len(self.secondary_y) == self.nseries) + if sec_true or all_sec: + self.axes[0] = ax ax.get_yaxis().set_visible(True) return ax @@ -1550,8 +1540,6 @@ def _make_plot(self): data = self._maybe_convert_index(self.data) self._make_ts_plot(data) else: - from pandas.core.frame import DataFrame - lines = [] x = self._get_xticks(convert_period=True) plotf = self._get_plot_function() @@ -1563,8 +1551,6 @@ def _make_plot(self): kwds = self.kwds.copy() self._maybe_add_color(colors, kwds, style, i) - lines += _get_all_lines(ax) - errors = self._get_errorbars(label=label, index=i) kwds = dict(kwds, **errors) @@ -1588,15 +1574,13 @@ def _make_plot(self): newlines = plotf(*args, **kwds) self._add_legend_handle(newlines[0], label, index=i) - lines.append(newlines[0]) - if self.stacked and not self.subplots: if (y >= 0).all(): self._pos_prior += y elif (y <= 0).all(): self._neg_prior += y - if self._is_datetype(): + lines = _get_all_lines(ax) left, right = _get_xlim(lines) ax.set_xlim(left, right) @@ -2253,14 +2237,7 @@ def plot_series(series, label=None, kind='line', use_index=True, rot=None, import matplotlib.pyplot as plt if ax is None and len(plt.get_fignums()) > 0: ax = _gca() - if ax.get_yaxis().get_ticks_position().strip().lower() == 'right': - fig = _gcf() - axes = fig.get_axes() - for i in reversed(range(len(axes))): - ax = axes[i] - ypos = ax.get_yaxis().get_ticks_position().strip().lower() - if ypos == 'left': - break + ax = getattr(ax, 'left_ax', ax) # is there harm in this? if label is None: @@ -2890,8 +2867,7 @@ def _get_layout(nplots, layout=None): def _subplots(nrows=1, ncols=1, naxes=None, sharex=False, sharey=False, squeeze=True, - subplot_kw=None, ax=None, secondary_y=False, data=None, - **fig_kw): + subplot_kw=None, ax=None, **fig_kw): """Create a figure with a set of subplots already made. This utility wrapper makes it convenient to create common layouts of @@ -2932,12 +2908,6 @@ def _subplots(nrows=1, ncols=1, naxes=None, sharex=False, sharey=False, squeeze= ax : Matplotlib axis object, optional - secondary_y : boolean or sequence of ints, default False - If True then y-axis will be on the right - - data : DataFrame, optional - If secondary_y is a sequence, data is used to select columns. - fig_kw : Other keyword arguments to be passed to the figure() call. Note that all keywords not recognized above will be automatically included here. @@ -2993,22 +2963,8 @@ def _subplots(nrows=1, ncols=1, naxes=None, sharex=False, sharey=False, squeeze= axarr = np.empty(nplots, dtype=object) - def on_right(i): - if isinstance(secondary_y, bool): - return secondary_y - if isinstance(data, DataFrame): - return data.columns[i] in secondary_y - # Create first subplot separately, so we can share it if requested ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw) - if on_right(0): - orig_ax = ax0 - ax0 = ax0.twinx() - ax0._get_lines.color_cycle = orig_ax._get_lines.color_cycle - - orig_ax.get_yaxis().set_visible(False) - orig_ax.right_ax = ax0 - ax0.left_ax = orig_ax if sharex: subplot_kw['sharex'] = ax0 @@ -3020,12 +2976,6 @@ def on_right(i): # convention. for i in range(1, nplots): ax = fig.add_subplot(nrows, ncols, i + 1, **subplot_kw) - if on_right(i): - orig_ax = ax - ax = ax.twinx() - ax._get_lines.color_cycle = orig_ax._get_lines.color_cycle - - orig_ax.get_yaxis().set_visible(False) axarr[i] = ax if nplots > 1: diff --git a/pandas/tseries/plotting.py b/pandas/tseries/plotting.py index e390607a0e7e2..6031482fd9927 100644 --- a/pandas/tseries/plotting.py +++ b/pandas/tseries/plotting.py @@ -4,11 +4,7 @@ """ #!!! TODO: Use the fact that axis can have units to simplify the process -import datetime as pydt -from datetime import datetime - from matplotlib import pylab -import matplotlib.units as units import numpy as np @@ -22,7 +18,7 @@ from pandas.tseries.converter import (PeriodConverter, TimeSeries_DateLocator, TimeSeries_DateFormatter) -from pandas.tools.plotting import _get_all_lines +from pandas.tools.plotting import _get_all_lines, _get_xlim #---------------------------------------------------------------------- # Plotting functions and monkey patches @@ -222,14 +218,6 @@ def _get_freq(ax, series): return freq -def _get_xlim(lines): - left, right = np.inf, -np.inf - for l in lines: - x = l.get_xdata() - left = min(x[0].ordinal, left) - right = max(x[-1].ordinal, right) - return left, right - # Patch methods for subplot. Only format_dateaxis is currently used. # Do we need the rest for convenience?
`xlim` is not set properly when `secondary_y=True`. This is different issue from #7322. Also, refactored `secondary_y` to be handled only by `MPLPlot`, because it will be required in future fix to pass multiple axes to `df.plot`, like #7069 (I'm willing to work soon..). ``` df = pd.DataFrame(np.random.randn(5, 5), columns=['A', 'B', 'C', 'D', 'E']) df.plot(secondary_y=True) ``` ![figure_1](https://cloud.githubusercontent.com/assets/1696302/3278423/15d4ba78-f3be-11e3-9d8e-172c9d0b754b.png) ``` df = pd.DataFrame(np.random.randn(5, 5), columns=['A', 'B', 'C', 'D', 'E']) df.plot(secondary_y=['A', 'B'], subplots=True) ``` ![figure_2](https://cloud.githubusercontent.com/assets/1696302/3278424/1b2dae44-f3be-11e3-8b8c-b19f5ae7fef3.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/7459
2014-06-14T12:30:28Z
2014-07-07T15:25:12Z
2014-07-07T15:25:12Z
2014-07-09T12:39:10Z
BUG: DTI.intersection doesnt preserve tz
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index cfdef3adb1f34..0c78b00003169 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -232,4 +232,5 @@ Bug Fixes - +- Bug in non-monotonic ``Index.union`` may preserve ``name`` incorrectly (:issue:`7458`) +- Bug in ``DatetimeIndex.intersection`` doesn't preserve timezone (:issue:`4690`) diff --git a/pandas/core/index.py b/pandas/core/index.py index 23837a4bc63b9..2252ba666ca59 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -777,7 +777,8 @@ def take(self, indexer, axis=0): """ indexer = com._ensure_platform_int(indexer) taken = self.view(np.ndarray).take(indexer) - return self._constructor(taken, name=self.name) + return self._simple_new(taken, name=self.name, freq=None, + tz=getattr(self, 'tz', None)) def format(self, name=False, formatter=None, **kwargs): """ @@ -1075,7 +1076,10 @@ def intersection(self, other): # duplicates indexer = self.get_indexer_non_unique(other.values)[0].unique() - return self.take(indexer) + taken = self.take(indexer) + if self.name != other.name: + taken.name = None + return taken def diff(self, other): """ diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index 0752ec52c9a1e..f2372f98b330b 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -2,7 +2,6 @@ from datetime import datetime, timedelta from pandas.compat import range, lrange, lzip, u, zip -import sys import operator import pickle import re @@ -447,6 +446,33 @@ def test_intersection(self): # non-iterable input assertRaisesRegexp(TypeError, "iterable", first.intersection, 0.5) + idx1 = Index([1, 2, 3, 4, 5], name='idx') + # if target has the same name, it is preserved + idx2 = Index([3, 4, 5, 6, 7], name='idx') + expected2 = Index([3, 4, 5], name='idx') + result2 = idx1.intersection(idx2) + self.assertTrue(result2.equals(expected2)) + self.assertEqual(result2.name, expected2.name) + + # if target name is different, it will be reset + idx3 = Index([3, 4, 5, 6, 7], name='other') + expected3 = Index([3, 4, 5], name=None) + result3 = idx1.intersection(idx3) + self.assertTrue(result3.equals(expected3)) + self.assertEqual(result3.name, expected3.name) + + # non monotonic + idx1 = Index([5, 3, 2, 4, 1], name='idx') + idx2 = Index([4, 7, 6, 5, 3], name='idx') + result2 = idx1.intersection(idx2) + self.assertTrue(tm.equalContents(result2, expected2)) + self.assertEqual(result2.name, expected2.name) + + idx3 = Index([4, 7, 6, 5, 3], name='other') + result3 = idx1.intersection(idx3) + self.assertTrue(tm.equalContents(result3, expected3)) + self.assertEqual(result3.name, expected3.name) + def test_union(self): first = self.strIndex[5:20] second = self.strIndex[:10] diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 7f0e00105bba5..87c1742c54b01 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -900,9 +900,7 @@ def take(self, indices, axis=0): maybe_slice = lib.maybe_indices_to_slice(com._ensure_int64(indices)) if isinstance(maybe_slice, slice): return self[maybe_slice] - indices = com._ensure_platform_int(indices) - taken = self.values.take(indices, axis=axis) - return self._simple_new(taken, self.name, None, self.tz) + return super(DatetimeIndex, self).take(indices, axis) def unique(self): """ @@ -1125,6 +1123,12 @@ def __array_finalize__(self, obj): self.name = getattr(obj, 'name', None) self._reset_identity() + def _wrap_union_result(self, other, result): + name = self.name if self.name == other.name else None + if self.tz != other.tz: + raise ValueError('Passed item and index have different timezone') + return self._simple_new(result, name=name, freq=None, tz=self.tz) + def intersection(self, other): """ Specialized intersection for DatetimeIndex objects. May be much faster diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index 4dc9ff88b328a..31785bb7a6753 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -1133,10 +1133,7 @@ def take(self, indices, axis=None): """ indices = com._ensure_platform_int(indices) taken = self.values.take(indices, axis=axis) - taken = taken.view(PeriodIndex) - taken.freq = self.freq - taken.name = self.name - return taken + return self._simple_new(taken, self.name, freq=self.freq) def append(self, other): """ diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index 38887ede2faca..d58621b320a84 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -2070,14 +2070,19 @@ def test_iteration(self): self.assertEqual(result[0].freq, index.freq) def test_take(self): - index = PeriodIndex(start='1/1/10', end='12/31/12', freq='D') + index = PeriodIndex(start='1/1/10', end='12/31/12', freq='D', name='idx') + expected = PeriodIndex([datetime(2010, 1, 6), datetime(2010, 1, 7), + datetime(2010, 1, 9), datetime(2010, 1, 13)], + freq='D', name='idx') - taken = index.take([5, 6, 8, 12]) + taken1 = index.take([5, 6, 8, 12]) taken2 = index[[5, 6, 8, 12]] - tm.assert_isinstance(taken, PeriodIndex) - self.assertEqual(taken.freq, index.freq) - tm.assert_isinstance(taken2, PeriodIndex) - self.assertEqual(taken2.freq, index.freq) + + for taken in [taken1, taken2]: + self.assertTrue(taken.equals(expected)) + tm.assert_isinstance(taken, PeriodIndex) + self.assertEqual(taken.freq, index.freq) + self.assertEqual(taken.name, expected.name) def test_joins(self): index = period_range('1/1/2000', '1/20/2000', freq='D') diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index 62b43cc0b189a..b04747665480e 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -2467,6 +2467,25 @@ def test_delete_slice(self): self.assertEqual(result.name, expected.name) self.assertEqual(result.freq, expected.freq) + def test_take(self): + dates = [datetime(2010, 1, 6), datetime(2010, 1, 7), + datetime(2010, 1, 9), datetime(2010, 1, 13)] + + for tz in [None, 'US/Eastern', 'Asia/Tokyo']: + idx = DatetimeIndex(start='1/1/10', end='12/31/12', + freq='D', tz=tz, name='idx') + expected = DatetimeIndex(dates, freq=None, name='idx', tz=tz) + + taken1 = idx.take([5, 6, 8, 12]) + taken2 = idx[[5, 6, 8, 12]] + + for taken in [taken1, taken2]: + self.assertTrue(taken.equals(expected)) + tm.assert_isinstance(taken, DatetimeIndex) + self.assertIsNone(taken.freq) + self.assertEqual(taken.tz, expected.tz) + self.assertEqual(taken.name, expected.name) + def test_map_bug_1677(self): index = DatetimeIndex(['2012-04-25 09:30:00.393000']) f = index.asof @@ -3035,14 +3054,46 @@ def test_union(self): self.assertEqual(df.index.values.dtype, np.dtype('M8[ns]')) def test_intersection(self): - rng = date_range('6/1/2000', '6/15/2000', freq='D') - rng = rng.delete(5) - - rng2 = date_range('5/15/2000', '6/20/2000', freq='D') - rng2 = DatetimeIndex(rng2.values) - - result = rng.intersection(rng2) - self.assertTrue(result.equals(rng)) + # GH 4690 (with tz) + for tz in [None, 'Asia/Tokyo']: + rng = date_range('6/1/2000', '6/30/2000', freq='D', name='idx') + + # if target has the same name, it is preserved + rng2 = date_range('5/15/2000', '6/20/2000', freq='D', name='idx') + expected2 = date_range('6/1/2000', '6/20/2000', freq='D', name='idx') + + # if target name is different, it will be reset + rng3 = date_range('5/15/2000', '6/20/2000', freq='D', name='other') + expected3 = date_range('6/1/2000', '6/20/2000', freq='D', name=None) + + result2 = rng.intersection(rng2) + result3 = rng.intersection(rng3) + for (result, expected) in [(result2, expected2), (result3, expected3)]: + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + self.assertEqual(result.freq, expected.freq) + self.assertEqual(result.tz, expected.tz) + + # non-monotonic + rng = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-02', '2011-01-03'], + tz=tz, name='idx') + + rng2 = DatetimeIndex(['2011-01-04', '2011-01-02', '2011-02-02', '2011-02-03'], + tz=tz, name='idx') + expected2 = DatetimeIndex(['2011-01-04', '2011-01-02'], tz=tz, name='idx') + + rng3 = DatetimeIndex(['2011-01-04', '2011-01-02', '2011-02-02', '2011-02-03'], + tz=tz, name='other') + expected3 = DatetimeIndex(['2011-01-04', '2011-01-02'], tz=tz, name=None) + + result2 = rng.intersection(rng2) + result3 = rng.intersection(rng3) + for (result, expected) in [(result2, expected2), (result3, expected3)]: + print(result, expected) + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + self.assertIsNone(result.freq) + self.assertEqual(result.tz, expected.tz) # empty same freq GH2129 rng = date_range('6/1/2000', '6/15/2000', freq='T') @@ -3571,26 +3622,39 @@ def test_shift(self): self.assertRaises(ValueError, idx.shift, 1) def test_setops_preserve_freq(self): - rng = date_range('1/1/2000', '1/1/2002') - - result = rng[:50].union(rng[50:100]) - self.assertEqual(result.freq, rng.freq) - - result = rng[:50].union(rng[30:100]) - self.assertEqual(result.freq, rng.freq) - - result = rng[:50].union(rng[60:100]) - self.assertIsNone(result.freq) - - result = rng[:50].intersection(rng[25:75]) - self.assertEqual(result.freqstr, 'D') - - nofreq = DatetimeIndex(list(rng[25:75])) - result = rng[:50].union(nofreq) - self.assertEqual(result.freq, rng.freq) - - result = rng[:50].intersection(nofreq) - self.assertEqual(result.freq, rng.freq) + for tz in [None, 'Asia/Tokyo', 'US/Eastern']: + rng = date_range('1/1/2000', '1/1/2002', name='idx', tz=tz) + + result = rng[:50].union(rng[50:100]) + self.assertEqual(result.name, rng.name) + self.assertEqual(result.freq, rng.freq) + self.assertEqual(result.tz, rng.tz) + + result = rng[:50].union(rng[30:100]) + self.assertEqual(result.name, rng.name) + self.assertEqual(result.freq, rng.freq) + self.assertEqual(result.tz, rng.tz) + + result = rng[:50].union(rng[60:100]) + self.assertEqual(result.name, rng.name) + self.assertIsNone(result.freq) + self.assertEqual(result.tz, rng.tz) + + result = rng[:50].intersection(rng[25:75]) + self.assertEqual(result.name, rng.name) + self.assertEqual(result.freqstr, 'D') + self.assertEqual(result.tz, rng.tz) + + nofreq = DatetimeIndex(list(rng[25:75]), name='other') + result = rng[:50].union(nofreq) + self.assertIsNone(result.name) + self.assertEqual(result.freq, rng.freq) + self.assertEqual(result.tz, rng.tz) + + result = rng[:50].intersection(nofreq) + self.assertIsNone(result.name) + self.assertEqual(result.freq, rng.freq) + self.assertEqual(result.tz, rng.tz) def test_min_max(self): rng = date_range('1/1/2000', '12/31/2000')
Closes #4690. Also, found and fixed a bug which non-monotonic `Index.union` incorrectly preserves `name` when `Index` have different names. ``` # monotonic idx1 = pd.Index([1, 2, 3, 4, 5], name='idx1') idx2 = pd.Index([4, 5, 6, 7, 8], name='other') idx1.intersection(idx2).name # None (Expected) # non-monotonic idx1 = pd.Index([5, 4, 3, 2, 1], name='idx1') idx2 = pd.Index([4, 5, 6, 7, 8], name='other') idx1.intersection(idx2).name # idx1 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7458
2014-06-14T12:02:01Z
2014-06-14T14:30:36Z
2014-06-14T14:30:36Z
2014-06-14T15:34:31Z
BUG: Better axis label handling for partial layout
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index c41bc13b18606..8beaeb8378f82 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -143,7 +143,7 @@ Performance - +- Bug in subplots displays ``ticklabels`` and ``labels`` in different rule (:issue:`5897`) diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index c96fd08233238..7e44885f11c2c 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -822,6 +822,12 @@ def test_plot(self): axes = _check_plot_works(df.plot, subplots=True, title='blah') self._check_axes_shape(axes, axes_num=3, layout=(3, 1)) + for ax in axes[:2]: + self._check_visible(ax.get_xticklabels(), visible=False) + self._check_visible([ax.xaxis.get_label()], visible=False) + for ax in [axes[2]]: + self._check_visible(ax.get_xticklabels()) + self._check_visible([ax.xaxis.get_label()]) _check_plot_works(df.plot, title='blah') @@ -2331,8 +2337,16 @@ def test_grouped_box_layout(self): column='height', return_type='dict') self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2)) - box = df.boxplot(column=['height', 'weight', 'category'], by='gender') + # GH 5897 + axes = df.boxplot(column=['height', 'weight', 'category'], by='gender', + return_type='axes') self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2)) + for ax in [axes['height']]: + self._check_visible(ax.get_xticklabels(), visible=False) + self._check_visible([ax.xaxis.get_label()], visible=False) + for ax in [axes['weight'], axes['category']]: + self._check_visible(ax.get_xticklabels()) + self._check_visible([ax.xaxis.get_label()]) box = df.groupby('classroom').boxplot( column=['height', 'weight', 'category'], return_type='dict') diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 37a982acc0bbd..c21f243a9a716 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -3022,15 +3022,16 @@ def on_right(i): if nplots > 1: if sharex and nrows > 1: - for i, ax in enumerate(axarr): - if np.ceil(float(i + 1) / ncols) < nrows: # only last row - [label.set_visible( - False) for label in ax.get_xticklabels()] + for ax in axarr[:naxes][:-ncols]: # only bottom row + for label in ax.get_xticklabels(): + label.set_visible(False) + ax.xaxis.get_label().set_visible(False) if sharey and ncols > 1: for i, ax in enumerate(axarr): if (i % ncols) != 0: # only first column - [label.set_visible( - False) for label in ax.get_yticklabels()] + for label in ax.get_yticklabels(): + label.set_visible(False) + ax.yaxis.get_label().set_visible(False) if naxes != nplots: for ax in axarr[naxes:]:
Closes #5897. Handle `ticklabels` and `labels` based on below rules. Currently `labels` are always displayed even if `ticklabels` are hidden and causes confusion. - If `sharex` is `True`, display only most bottom `xticklabels` on each columns. (Because #7035 hides the bottom-right axes). Hide `xlabel` as the same manner as `xticklabels` - If `sharey` is True, display most left `yticklabels` (no change). Hide `ylabel` as the same manner as `yticklabels` (changed) ``` import pandas as pd from numpy.random import randn import matplotlib.pyplot as plt d = pd.DataFrame({'one':randn(5), 'two':randn(5), 'three':randn(5), 'label':['label'] * 5}, columns = ['one','two','three', 'label']) bp= d.boxplot(by='label', rot=45) ``` ![figure_1](https://cloud.githubusercontent.com/assets/1696302/3074667/70d267ea-e34c-11e3-8996-24db3e31ed67.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/7457
2014-06-14T11:21:40Z
2014-07-01T15:28:58Z
2014-07-01T15:28:58Z
2014-09-18T20:28:41Z
FIX integer column names for older sqlalchemy version GH6340, GH7330
diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 0ac4b5f3fcc2b..bb6f9cee5766e 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -567,7 +567,7 @@ def insert(self): ins = self.insert_statement() data_list = [] temp = self.insert_data() - keys = temp.columns + keys = list(map(str, temp.columns)) for t in temp.itertuples(): data = dict((k, self.maybe_asscalar(v))
Closes #7330. Should enable to merge #7022 (#6340).
https://api.github.com/repos/pandas-dev/pandas/pulls/7456
2014-06-14T10:42:52Z
2014-06-14T11:02:08Z
2014-06-14T11:02:07Z
2014-08-21T07:58:27Z
DOC: Add how to calculate month-start to cookbook
diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst index 2548f2d88c5d9..b16c71bffd64d 100644 --- a/doc/source/cookbook.rst +++ b/doc/source/cookbook.rst @@ -269,6 +269,13 @@ Turn a matrix with hours in columns and days in rows into a continuous row seque `Dealing with duplicates when reindexing a timeseries to a specified frequency <http://stackoverflow.com/questions/22244383/pandas-df-refill-adding-two-columns-of-different-shape>`__ +Calculate the first day of the month for each entry in a DatetimeIndex + +.. ipython:: python + + dates = pd.date_range('2000-01-01', periods=5) + dates.to_period(freq='M').to_timestamp() + .. _cookbook.resample: Resampling
Related: #7449
https://api.github.com/repos/pandas-dev/pandas/pulls/7455
2014-06-14T05:31:07Z
2014-06-14T13:02:05Z
2014-06-14T13:02:05Z
2014-06-14T23:01:42Z
ENH/API: offsets funcs now accepts datetime64
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index cfdef3adb1f34..983dbb61c1288 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -126,7 +126,7 @@ Enhancements - +- All offsets ``apply``, ``rollforward`` and ``rollback`` can now handle ``np.datetime64``, previously results in ``ApplyTypeError`` (:issue:`7452`) diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 9cbef50f2d82f..91ae91e92f3c3 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -45,6 +45,8 @@ def wrapper(self, other): return tslib.NaT if type(other) == date: other = datetime(other.year, other.month, other.day) + elif isinstance(other, np.datetime64): + other = as_timestamp(other) result = func(self, other) @@ -555,20 +557,6 @@ def apply(self, other): return as_timestamp(result) - elif isinstance(other, np.datetime64): - date_in = other - np_day = date_in.astype('datetime64[D]') - np_time = date_in - np_day - - np_incr_dt = np.busday_offset(np_day, self.n, roll=roll, - busdaycal=self.busdaycalendar) - result = np_incr_dt + np_time - - if self.offset: - result = result + self.offset - - return as_timestamp(result) - elif isinstance(other, (timedelta, Tick)): return BDay(self.n, offset=self.offset + other, normalize=self.normalize) diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py index 4fc7d281bc473..ac7a8ae410429 100644 --- a/pandas/tseries/tests/test_offsets.py +++ b/pandas/tseries/tests/test_offsets.py @@ -222,19 +222,20 @@ def _check_nanofunc_works(self, offset, funcname, dt, expected): self.assertEqual(func(t1), expected) def test_apply(self): - dt = datetime(2011, 1, 1, 9, 0) + sdt = datetime(2011, 1, 1, 9, 0) + ndt = np.datetime64('2011-01-01 09:00Z') for offset in self.offset_types: - expected = self.expecteds[offset.__name__] - - if offset == Nano: - self._check_nanofunc_works(offset, 'apply', dt, expected) - else: - self._check_offsetfunc_works(offset, 'apply', dt, expected) + for dt in [sdt, ndt]: + expected = self.expecteds[offset.__name__] + if offset == Nano: + self._check_nanofunc_works(offset, 'apply', dt, expected) + else: + self._check_offsetfunc_works(offset, 'apply', dt, expected) - expected = Timestamp(expected.date()) - self._check_offsetfunc_works(offset, 'apply', dt, expected, - normalize=True) + expected = Timestamp(expected.date()) + self._check_offsetfunc_works(offset, 'apply', dt, expected, + normalize=True) def test_rollforward(self): expecteds = self.expecteds.copy() @@ -261,17 +262,19 @@ def test_rollforward(self): 'Micro': Timestamp('2011-01-01 00:00:00')} norm_expected.update(normalized) - dt = datetime(2011, 1, 1, 9, 0) - for offset in self.offset_types: - expected = expecteds[offset.__name__] + sdt = datetime(2011, 1, 1, 9, 0) + ndt = np.datetime64('2011-01-01 09:00Z') - if offset == Nano: - self._check_nanofunc_works(offset, 'rollforward', dt, expected) - else: - self._check_offsetfunc_works(offset, 'rollforward', dt, expected) - expected = norm_expected[offset.__name__] - self._check_offsetfunc_works(offset, 'rollforward', dt, expected, - normalize=True) + for offset in self.offset_types: + for dt in [sdt, ndt]: + expected = expecteds[offset.__name__] + if offset == Nano: + self._check_nanofunc_works(offset, 'rollforward', dt, expected) + else: + self._check_offsetfunc_works(offset, 'rollforward', dt, expected) + expected = norm_expected[offset.__name__] + self._check_offsetfunc_works(offset, 'rollforward', dt, expected, + normalize=True) def test_rollback(self): expecteds = {'BusinessDay': Timestamp('2010-12-31 09:00:00'), @@ -315,18 +318,20 @@ def test_rollback(self): 'Micro': Timestamp('2011-01-01 00:00:00')} norm_expected.update(normalized) - dt = datetime(2011, 1, 1, 9, 0) - for offset in self.offset_types: - expected = expecteds[offset.__name__] + sdt = datetime(2011, 1, 1, 9, 0) + ndt = np.datetime64('2011-01-01 09:00Z') - if offset == Nano: - self._check_nanofunc_works(offset, 'rollback', dt, expected) - else: - self._check_offsetfunc_works(offset, 'rollback', dt, expected) + for offset in self.offset_types: + for dt in [sdt, ndt]: + expected = expecteds[offset.__name__] + if offset == Nano: + self._check_nanofunc_works(offset, 'rollback', dt, expected) + else: + self._check_offsetfunc_works(offset, 'rollback', dt, expected) - expected = norm_expected[offset.__name__] - self._check_offsetfunc_works(offset, 'rollback', - dt, expected, normalize=True) + expected = norm_expected[offset.__name__] + self._check_offsetfunc_works(offset, 'rollback', + dt, expected, normalize=True) def test_onOffset(self): diff --git a/vb_suite/timeseries.py b/vb_suite/timeseries.py index a3d4d4c7d40a5..2b63eeaf99550 100644 --- a/vb_suite/timeseries.py +++ b/vb_suite/timeseries.py @@ -285,14 +285,33 @@ def date_range(start=None, end=None, periods=None, freq=None): setup = common_setup + """ import datetime as dt import pandas as pd +import numpy as np date = dt.datetime(2011,1,1) +dt64 = np.datetime64('2011-01-01 09:00Z') + +day = pd.offsets.Day() +year = pd.offsets.YearBegin() cday = pd.offsets.CustomBusinessDay() cme = pd.offsets.CustomBusinessMonthEnd() """ +timeseries_day_incr = Benchmark("date + day",setup) + +timeseries_day_apply = Benchmark("day.apply(date)",setup) + +timeseries_year_incr = Benchmark("date + year",setup) + +timeseries_year_apply = Benchmark("year.apply(date)",setup) + timeseries_custom_bday_incr = \ Benchmark("date + cday",setup) +timeseries_custom_bday_apply = \ + Benchmark("cday.apply(date)",setup) + +timeseries_custom_bday_apply_dt64 = \ + Benchmark("cday.apply(dt64)",setup) + # Increment by n timeseries_custom_bday_incr_n = \ Benchmark("date + 10 * cday",setup)
Even though `CustomBusinessDay.apply` can handle `np.datetime64`, most of other offsets cannot accept `datetime64` and raises `ApplyTypeError`. The fix allows all offsets `apply`, `rollforward` and `rollback` to handle `np.datetime64` properly. ``` import pandas as pd import numpy as np t = np.datetime64('2011-01-01 09:00Z') cday = pd.offsets.CustomBusinessDay() cday.apply(t) #2011-01-02 09:00:00 day = pd.offsets.Day() day.apply(t) # pandas.tseries.offsets.ApplyTypeError: Unhandled type: datetime64 ``` **NOTE:** `CustomBusinessDay` had separate logic for `datetime` and `np.datetime64`. Based on the comparison using current master, `np.datetime64` logic looks slower. Thus I removed it. ``` import timeit setup = """ import pandas as pd import numpy as np cday = pd.offsets.CustomBusinessDay() np_dt64 = [np.datetime64('2014-05-{0:02} 09:00Z'.format(i)) for i in range(1, 31)] timestamps = [pd.Timestamp('2014-05-{0:02} 09:00Z'.format(i)) for i in range(1, 31)] """ t = timeit.Timer('[cday.apply(d) for d in np_dt64]', setup) print t.timeit(1000) #1.6253619194 t = timeit.Timer('[cday.apply(d) for d in timestamps]', setup) print t.timeit(1000) #0.959406137466 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7452
2014-06-13T21:58:09Z
2014-06-14T19:01:04Z
2014-06-14T19:01:04Z
2014-06-14T21:52:32Z
TST/CLN: centralize module check funcs
diff --git a/pandas/stats/tests/test_moments.py b/pandas/stats/tests/test_moments.py index 3ea40f80a822c..dd91952cf537c 100644 --- a/pandas/stats/tests/test_moments.py +++ b/pandas/stats/tests/test_moments.py @@ -17,11 +17,6 @@ N, K = 100, 10 -def _skip_if_no_scipy(): - try: - import scipy.stats - except ImportError: - raise nose.SkipTest("no scipy.stats") class TestMoments(tm.TestCase): @@ -68,7 +63,7 @@ def test_rolling_mean(self): self._check_moment_func(mom.rolling_mean, np.mean) def test_cmov_mean(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() try: from scikits.timeseries.lib import cmov_mean except ImportError: @@ -86,7 +81,7 @@ def test_cmov_mean(self): assert_series_equal(xp, rs) def test_cmov_window(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() try: from scikits.timeseries.lib import cmov_window except ImportError: @@ -104,7 +99,7 @@ def test_cmov_window(self): assert_series_equal(xp, rs) def test_cmov_window_corner(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() try: from scikits.timeseries.lib import cmov_window except ImportError: @@ -128,7 +123,7 @@ def test_cmov_window_corner(self): self.assertEqual(len(rs), 5) def test_cmov_window_frame(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() try: from scikits.timeseries.lib import cmov_window except ImportError: @@ -141,7 +136,7 @@ def test_cmov_window_frame(self): assert_frame_equal(DataFrame(xp), rs) def test_cmov_window_na_min_periods(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() try: from scikits.timeseries.lib import cmov_window except ImportError: @@ -158,7 +153,7 @@ def test_cmov_window_na_min_periods(self): assert_series_equal(xp, rs) def test_cmov_window_regular(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() try: from scikits.timeseries.lib import cmov_window except ImportError: @@ -174,7 +169,7 @@ def test_cmov_window_regular(self): assert_series_equal(Series(xp), rs) def test_cmov_window_special(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() try: from scikits.timeseries.lib import cmov_window except ImportError: diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py index 456d331156011..884a2c1a1ae8e 100644 --- a/pandas/tests/test_format.py +++ b/pandas/tests/test_format.py @@ -86,18 +86,6 @@ def skip_if_np_version_under1p7(): raise nose.SkipTest('numpy >= 1.7 required') -def _skip_if_no_pytz(): - try: - import pytz - except ImportError: - raise nose.SkipTest("pytz not installed") - -def _skip_if_no_dateutil(): - try: - import dateutil - except ImportError: - raise nose.SkipTest("dateutil not installed") - class TestDataFrameFormatting(tm.TestCase): _multiprocess_can_split_ = True @@ -2930,7 +2918,7 @@ def test_no_tz(self): self.assertEqual(str(ts_nanos_micros), "1970-01-01 00:00:00.000001200") def test_tz_pytz(self): - _skip_if_no_pytz() + tm._skip_if_no_pytz() import pytz @@ -2944,7 +2932,7 @@ def test_tz_pytz(self): self.assertEqual(str(dt_datetime_us), str(Timestamp(dt_datetime_us))) def test_tz_dateutil(self): - _skip_if_no_dateutil() + tm._skip_if_no_dateutil() import dateutil utc = dateutil.tz.tzutc() diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 6848b130dee3a..f35f64e35b330 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -52,12 +52,6 @@ from numpy.testing.decorators import slow -def _skip_if_no_scipy(): - try: - import scipy.stats - except ImportError: - raise nose.SkipTest("no scipy.stats module") - #--------------------------------------------------------------------- # DataFrame test cases @@ -6739,28 +6733,28 @@ def _check_method(self, method='pearson', check_minp=False): expected.ix['A', 'B'] = expected.ix['B', 'A'] = nan def test_corr_pearson(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() self.frame['A'][:5] = nan self.frame['B'][5:10] = nan self._check_method('pearson') def test_corr_kendall(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() self.frame['A'][:5] = nan self.frame['B'][5:10] = nan self._check_method('kendall') def test_corr_spearman(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() self.frame['A'][:5] = nan self.frame['B'][5:10] = nan self._check_method('spearman') def test_corr_non_numeric(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() self.frame['A'][:5] = nan self.frame['B'][5:10] = nan @@ -6770,7 +6764,7 @@ def test_corr_non_numeric(self): assert_frame_equal(result, expected) def test_corr_nooverlap(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() # nothing in common for meth in ['pearson', 'kendall', 'spearman']: @@ -6783,7 +6777,7 @@ def test_corr_nooverlap(self): self.assertEqual(rs.ix['B', 'B'], 1) def test_corr_constant(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() # constant --> all NA @@ -10957,7 +10951,7 @@ def test_sem(self): nanops._USE_BOTTLENECK = True def test_skew(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() from scipy.stats import skew def alt(x): @@ -10968,7 +10962,7 @@ def alt(x): self._check_stat_op('skew', alt) def test_kurt(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() from scipy.stats import kurtosis @@ -11320,7 +11314,7 @@ def test_cumprod(self): df.cumprod(1) def test_rank(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() from scipy.stats import rankdata self.frame['A'][::2] = np.nan @@ -11412,7 +11406,7 @@ def test_rank2(self): assert_frame_equal(df.rank(), exp) def test_rank_na_option(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() from scipy.stats import rankdata self.frame['A'][::2] = np.nan diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index 3cf4cb8bc5809..1a123eda601a2 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -1,9 +1,7 @@ # pylint: disable-msg=E1101,W0612 from datetime import datetime, timedelta -import operator import nose -import copy import numpy as np from numpy import nan import pandas as pd @@ -11,7 +9,6 @@ from pandas import (Index, Series, DataFrame, Panel, isnull, notnull,date_range, _np_version_under1p7) from pandas.core.index import Index, MultiIndex -from pandas.tseries.index import Timestamp, DatetimeIndex import pandas.core.common as com @@ -25,13 +22,6 @@ import pandas.util.testing as tm -def _skip_if_no_scipy(): - try: - import scipy.interpolate - except ImportError: - raise nose.SkipTest('scipy.interpolate missing') - - def _skip_if_no_pchip(): try: from scipy.interpolate import pchip_interpolate @@ -491,7 +481,7 @@ def test_interpolate(self): self.assertRaises(ValueError, non_ts.interpolate, method='time') def test_interp_regression(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() _skip_if_no_pchip() ser = Series(np.sort(np.random.uniform(size=100))) @@ -509,7 +499,7 @@ def test_interpolate_corners(self): s = Series([]).interpolate() assert_series_equal(s.interpolate(), s) - _skip_if_no_scipy() + tm._skip_if_no_scipy() s = Series([np.nan, np.nan]) assert_series_equal(s.interpolate(method='polynomial', order=1), s) @@ -544,7 +534,7 @@ def test_nan_interpolate(self): expected = Series([0., 1., 2., 3.]) assert_series_equal(result, expected) - _skip_if_no_scipy() + tm._skip_if_no_scipy() result = s.interpolate(method='polynomial', order=1) assert_series_equal(result, expected) @@ -561,14 +551,14 @@ def test_nan_str_index(self): assert_series_equal(result, expected) def test_interp_quad(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4]) result = sq.interpolate(method='quadratic') expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4]) assert_series_equal(result, expected) def test_interp_scipy_basic(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() s = Series([1, 3, np.nan, 12, np.nan, 25]) # slinear expected = Series([1., 3., 7.5, 12., 18.5, 25.]) @@ -611,7 +601,7 @@ def test_interp_limit(self): def test_interp_all_good(self): # scipy - _skip_if_no_scipy() + tm._skip_if_no_scipy() s = Series([1, 2, 3]) result = s.interpolate(method='polynomial', order=1) assert_series_equal(result, s) @@ -629,18 +619,18 @@ def test_interp_multiIndex(self): result = s.interpolate() assert_series_equal(result, expected) - _skip_if_no_scipy() + tm._skip_if_no_scipy() with tm.assertRaises(ValueError): s.interpolate(method='polynomial', order=1) def test_interp_nonmono_raise(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() s = Series([1, np.nan, 3], index=[0, 2, 1]) with tm.assertRaises(ValueError): s.interpolate(method='krogh') def test_interp_datetime64(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() df = Series([1, np.nan, 3], index=date_range('1/1/2000', periods=3)) result = df.interpolate(method='nearest') expected = Series([1., 1., 3.], index=date_range('1/1/2000', periods=3)) @@ -768,7 +758,7 @@ def test_interp_nan_idx(self): df.interpolate(method='values') def test_interp_various(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7], 'C': [1, 2, 3, 5, 8, 13, 21]}) df = df.set_index('C') @@ -810,7 +800,7 @@ def test_interp_various(self): assert_frame_equal(result, expected) def test_interp_alt_scipy(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7], 'C': [1, 2, 3, 5, 8, 13, 21]}) result = df.interpolate(method='barycentric') @@ -850,7 +840,7 @@ def test_interp_rowwise(self): assert_frame_equal(result, expected) # scipy route - _skip_if_no_scipy() + tm._skip_if_no_scipy() result = df.interpolate(axis=1, method='values') assert_frame_equal(result, expected) @@ -871,7 +861,7 @@ def test_interp_leading_nans(self): expected['B'].loc[3] = -3.75 assert_frame_equal(result, expected) - _skip_if_no_scipy() + tm._skip_if_no_scipy() result = df.interpolate(method='polynomial', order=1) assert_frame_equal(result, expected) @@ -1022,7 +1012,7 @@ def test_describe_objects(self): assert_frame_equal(df[['C2', 'C3']].describe(), df[['C3']].describe()) def test_no_order(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() s = Series([0, 1, np.nan, 3]) with tm.assertRaises(ValueError): s.interpolate(method='polynomial') @@ -1030,7 +1020,7 @@ def test_no_order(self): s.interpolate(method='spline') def test_spline(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() s = Series([1, 2, np.nan, 4, 5, np.nan, 7]) result = s.interpolate(method='spline', order=1) expected = Series([1., 2., 3., 4., 5., 6., 7.]) diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index c49607eef1b42..c96fd08233238 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -28,12 +28,6 @@ import pandas.tools.plotting as plotting -def _skip_if_no_scipy(): - try: - import scipy - except ImportError: - raise nose.SkipTest("no scipy") - def _skip_if_no_scipy_gaussian_kde(): try: import scipy @@ -655,7 +649,7 @@ def test_plot_fails_when_ax_differs_from_figure(self): @slow def test_kde(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() _skip_if_no_scipy_gaussian_kde() _check_plot_works(self.ts.plot, kind='kde') _check_plot_works(self.ts.plot, kind='density') @@ -664,7 +658,7 @@ def test_kde(self): @slow def test_kde_kwargs(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() _skip_if_no_scipy_gaussian_kde() from numpy import linspace _check_plot_works(self.ts.plot, kind='kde', bw_method=.5, ind=linspace(-100,100,20)) @@ -674,7 +668,7 @@ def test_kde_kwargs(self): @slow def test_kde_color(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() _skip_if_no_scipy_gaussian_kde() ax = self.ts.plot(kind='kde', logy=True, color='r') self._check_ax_scales(ax, yaxis='log') @@ -1486,7 +1480,7 @@ def test_boxplot_return_type(self): @slow def test_kde(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() _skip_if_no_scipy_gaussian_kde() df = DataFrame(randn(100, 4)) ax = _check_plot_works(df.plot, kind='kde') @@ -1584,7 +1578,7 @@ def test_hist_layout(self): @slow def test_scatter(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() df = DataFrame(randn(100, 2)) import pandas.tools.plotting as plt diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index d0baa4b1ecad3..255da1af7d11b 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -28,13 +28,6 @@ import pandas.util.testing as tm -def _skip_if_no_scipy(): - try: - import scipy.stats - except ImportError: - raise nose.SkipTest("no scipy.stats") - - class PanelTests(object): panel = None diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index eb2b53dff3879..093954f1d8c1d 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -34,25 +34,6 @@ import pandas.util.testing as tm -def _skip_if_no_scipy(): - try: - import scipy.stats - except ImportError: - raise nose.SkipTest("scipy not installed") - - -def _skip_if_no_pytz(): - try: - import pytz - except ImportError: - raise nose.SkipTest("pytz not installed") - -def _skip_if_no_dateutil(): - try: - import dateutil - except ImportError: - raise nose.SkipTest("dateutil not installed") - #------------------------------------------------------------------------------ # Series test cases @@ -2010,14 +1991,14 @@ def test_sem(self): self.assert_(isnull(result)) def test_skew(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() from scipy.stats import skew alt = lambda x: skew(x, bias=False) self._check_stat_op('skew', alt) def test_kurt(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() from scipy.stats import kurtosis alt = lambda x: kurtosis(x, bias=False) @@ -3786,7 +3767,7 @@ def test_update(self): # df['c'].update(Series(['foo'],index=[0])) ##### def test_corr(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() import scipy.stats as stats @@ -3817,7 +3798,7 @@ def test_corr(self): self.assertAlmostEqual(result, expected) def test_corr_rank(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() import scipy import scipy.stats as stats @@ -4138,7 +4119,7 @@ def test_nsmallest_nlargest(self): assert_series_equal(s.nsmallest(), s.iloc[[2, 3, 0, 4]]) def test_rank(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() from scipy.stats import rankdata self.ts[::2] = np.nan @@ -4639,7 +4620,7 @@ def test_getitem_setitem_datetimeindex(self): assert_series_equal(result, ts) def test_getitem_setitem_datetime_tz_pytz(self): - _skip_if_no_pytz(); + tm._skip_if_no_pytz(); from pytz import timezone as tz from pandas import date_range @@ -4675,7 +4656,7 @@ def test_getitem_setitem_datetime_tz_pytz(self): def test_getitem_setitem_datetime_tz_dateutil(self): - _skip_if_no_dateutil(); + tm._skip_if_no_dateutil(); from dateutil.tz import gettz, tzutc tz = lambda x: tzutc() if x == 'UTC' else gettz(x) # handle special case for utc in dateutil diff --git a/pandas/tests/test_tseries.py b/pandas/tests/test_tseries.py index 66d5dcc72d776..d5f7a536f9fe8 100644 --- a/pandas/tests/test_tseries.py +++ b/pandas/tests/test_tseries.py @@ -8,13 +8,7 @@ from pandas.compat import range, lrange, zip import pandas.lib as lib import pandas.algos as algos -from datetime import datetime -def _skip_if_no_scipy(): - try: - import scipy.stats - except ImportError: - raise nose.SkipTest("scipy not installed") class TestTseriesUtil(tm.TestCase): _multiprocess_can_split_ = True @@ -342,7 +336,7 @@ def test_convert_objects_complex_number(): def test_rank(): - _skip_if_no_scipy() + tm._skip_if_no_scipy() from scipy.stats import rankdata def _check(arr): diff --git a/pandas/tseries/tests/test_daterange.py b/pandas/tseries/tests/test_daterange.py index 0a732ac7bc7e8..81cf34bbc269b 100644 --- a/pandas/tseries/tests/test_daterange.py +++ b/pandas/tseries/tests/test_daterange.py @@ -11,31 +11,12 @@ from pandas import Timestamp from pandas.tseries.offsets import generate_range from pandas.tseries.index import cdate_range, bdate_range, date_range -import pandas.tseries.tools as tools import pandas.core.datetools as datetools from pandas.util.testing import assertRaisesRegexp import pandas.util.testing as tm -def _skip_if_no_pytz(): - try: - import pytz - except ImportError: - raise nose.SkipTest("pytz not installed") - -def _skip_if_no_dateutil(): - try: - import dateutil - except ImportError: - raise nose.SkipTest("dateutil not installed") - - -def _skip_if_no_cday(): - if datetools.cday is None: - raise nose.SkipTest("CustomBusinessDay not available.") - - def _skip_if_windows_python_3(): if sys.version_info > (3,) and sys.platform == 'win32': raise nose.SkipTest("not used on python 3/win32") @@ -56,7 +37,7 @@ def test_generate(self): self.assert_numpy_array_equal(rng1, rng2) def test_generate_cday(self): - _skip_if_no_cday() + tm._skip_if_no_cday() rng1 = list(generate_range(START, END, offset=datetools.cday)) rng2 = list(generate_range(START, END, time_rule='C')) self.assert_numpy_array_equal(rng1, rng2) @@ -298,12 +279,12 @@ def test_summary(self): self.rng[2:2].summary() def test_summary_pytz(self): - _skip_if_no_pytz() + tm._skip_if_no_pytz() import pytz bdate_range('1/1/2005', '1/1/2009', tz=pytz.utc).summary() def test_summary_dateutil(self): - _skip_if_no_dateutil() + tm._skip_if_no_dateutil() import dateutil bdate_range('1/1/2005', '1/1/2009', tz=dateutil.tz.tzutc()).summary() @@ -372,7 +353,7 @@ def test_range_bug(self): def test_range_tz_pytz(self): # GH 2906 - _skip_if_no_pytz() + tm._skip_if_no_pytz() from pytz import timezone as tz start = datetime(2011, 1, 1, tzinfo=tz('US/Eastern')) @@ -395,11 +376,11 @@ def test_range_tz_pytz(self): def test_range_tz_dateutil(self): # GH 2906 - _skip_if_no_dateutil() + tm._skip_if_no_dateutil() # Use maybe_get_tz to fix filename in tz under dateutil. from pandas.tslib import maybe_get_tz tz = lambda x: maybe_get_tz('dateutil/' + x) - + start = datetime(2011, 1, 1, tzinfo=tz('US/Eastern')) end = datetime(2011, 1, 3, tzinfo=tz('US/Eastern')) @@ -419,7 +400,7 @@ def test_range_tz_dateutil(self): self.assert_(dr[2] == end) def test_month_range_union_tz_pytz(self): - _skip_if_no_pytz() + tm._skip_if_no_pytz() from pytz import timezone tz = timezone('US/Eastern') @@ -436,7 +417,7 @@ def test_month_range_union_tz_pytz(self): def test_month_range_union_tz_dateutil(self): _skip_if_windows_python_3() - _skip_if_no_dateutil() + tm._skip_if_no_dateutil() from dateutil.tz import gettz as timezone tz = timezone('US/Eastern') @@ -470,7 +451,7 @@ def test_range_closed(self): class TestCustomDateRange(tm.TestCase): def setUp(self): - _skip_if_no_cday() + tm._skip_if_no_cday() self.rng = cdate_range(START, END) def test_constructor(self): @@ -634,12 +615,12 @@ def test_summary(self): self.rng[2:2].summary() def test_summary_pytz(self): - _skip_if_no_pytz() + tm._skip_if_no_pytz() import pytz cdate_range('1/1/2005', '1/1/2009', tz=pytz.utc).summary() def test_summary_dateutil(self): - _skip_if_no_dateutil() + tm._skip_if_no_dateutil() import dateutil cdate_range('1/1/2005', '1/1/2009', tz=dateutil.tz.tzutc()).summary() diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py index fddfb3e3b4b56..18a66f963cb1c 100644 --- a/pandas/tseries/tests/test_offsets.py +++ b/pandas/tseries/tests/test_offsets.py @@ -9,7 +9,7 @@ import numpy as np from pandas.core.datetools import ( - bday, BDay, cday, CDay, BQuarterEnd, BMonthEnd, + bday, BDay, CDay, BQuarterEnd, BMonthEnd, CBMonthEnd, CBMonthBegin, BYearEnd, MonthEnd, MonthBegin, BYearBegin, QuarterBegin, BQuarterBegin, BMonthBegin, DateOffset, Week, @@ -42,11 +42,6 @@ def test_monthrange(): assert monthrange(y, m) == calendar.monthrange(y, m) -def _skip_if_no_cday(): - if cday is None: - raise nose.SkipTest("CustomBusinessDay not available.") - - #### ## Misc function tests #### @@ -594,7 +589,7 @@ def setUp(self): self.d = datetime(2008, 1, 1) self.nd = np.datetime64('2008-01-01 00:00:00Z') - _skip_if_no_cday() + tm._skip_if_no_cday() self.offset = CDay() self.offset2 = CDay(2) @@ -813,7 +808,7 @@ class CustomBusinessMonthBase(object): def setUp(self): self.d = datetime(2008, 1, 1) - _skip_if_no_cday() + tm._skip_if_no_cday() self.offset = self._object() self.offset2 = self._object(2) diff --git a/pandas/tseries/tests/test_plotting.py b/pandas/tseries/tests/test_plotting.py index 86d162e49bb2f..f0641b6389ebf 100644 --- a/pandas/tseries/tests/test_plotting.py +++ b/pandas/tseries/tests/test_plotting.py @@ -17,19 +17,7 @@ from pandas.util.testing import assert_series_equal, ensure_clean import pandas.util.testing as tm - -def _skip_if_no_scipy(): - try: - import scipy - except ImportError: - raise nose.SkipTest("scipy not installed") - -def _skip_if_no_scipy_gaussian_kde(): - try: - import scipy - from scipy.stats import gaussian_kde - except ImportError: - raise nose.SkipTest("scipy version doesn't support gaussian_kde") +from pandas.tests.test_graphics import _skip_if_no_scipy_gaussian_kde @tm.mplskip @@ -573,7 +561,7 @@ def test_secondary_y_ts(self): @slow def test_secondary_kde(self): - _skip_if_no_scipy() + tm._skip_if_no_scipy() _skip_if_no_scipy_gaussian_kde() import matplotlib.pyplot as plt diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py index db496a708adbe..7c73933d9b001 100644 --- a/pandas/tseries/tests/test_resample.py +++ b/pandas/tseries/tests/test_resample.py @@ -26,13 +26,6 @@ bday = BDay() -def _skip_if_no_pytz(): - try: - import pytz - except ImportError: - raise nose.SkipTest("pytz not installed") - - class TestResample(tm.TestCase): _multiprocess_can_split_ = True diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index d2cfdff2b003d..9eb8f9b30b957 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -1,7 +1,6 @@ # pylint: disable-msg=E1101,W0612 from datetime import datetime, time, timedelta, date import sys -import os import operator import nose @@ -41,18 +40,6 @@ from numpy.testing.decorators import slow -def _skip_if_no_dateutil(): - try: - import dateutil - except ImportError: - raise nose.SkipTest("dateutil not installed") - -def _skip_if_no_pytz(): - try: - import pytz - except ImportError: - raise nose.SkipTest("pytz not installed") - def _skip_if_has_locale(): import locale lang, _ = locale.getlocale() @@ -402,7 +389,7 @@ def test_date_range_ambiguous_arguments(self): freq='s', periods=10) def test_timestamp_to_datetime(self): - _skip_if_no_pytz() + tm._skip_if_no_pytz() rng = date_range('20090415', '20090519', tz='US/Eastern') @@ -412,7 +399,7 @@ def test_timestamp_to_datetime(self): self.assertEqual(stamp.tzinfo, dtval.tzinfo) def test_timestamp_to_datetime_dateutil(self): - _skip_if_no_pytz() + tm._skip_if_no_pytz() rng = date_range('20090415', '20090519', tz='dateutil/US/Eastern') @@ -422,7 +409,7 @@ def test_timestamp_to_datetime_dateutil(self): self.assertEqual(stamp.tzinfo, dtval.tzinfo) def test_timestamp_to_datetime_explicit_pytz(self): - _skip_if_no_pytz() + tm._skip_if_no_pytz() import pytz rng = date_range('20090415', '20090519', tz=pytz.timezone('US/Eastern')) @@ -434,7 +421,7 @@ def test_timestamp_to_datetime_explicit_pytz(self): def test_timestamp_to_datetime_explicit_dateutil(self): _skip_if_windows_python_3() - _skip_if_no_dateutil() + tm._skip_if_no_dateutil() import dateutil rng = date_range('20090415', '20090519', tz=dateutil.tz.gettz('US/Eastern')) @@ -445,7 +432,7 @@ def test_timestamp_to_datetime_explicit_dateutil(self): self.assertEquals(stamp.tzinfo, dtval.tzinfo) def test_index_convert_to_datetime_array(self): - _skip_if_no_pytz() + tm._skip_if_no_pytz() def _check_rng(rng): converted = rng.to_pydatetime() @@ -464,7 +451,7 @@ def _check_rng(rng): _check_rng(rng_utc) def test_index_convert_to_datetime_array_explicit_pytz(self): - _skip_if_no_pytz() + tm._skip_if_no_pytz() import pytz def _check_rng(rng): @@ -484,7 +471,7 @@ def _check_rng(rng): _check_rng(rng_utc) def test_index_convert_to_datetime_array_dateutil(self): - _skip_if_no_dateutil() + tm._skip_if_no_dateutil() import dateutil def _check_rng(rng): @@ -1515,7 +1502,7 @@ def test_to_period_microsecond(self): self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U')) def test_to_period_tz_pytz(self): - _skip_if_no_pytz() + tm._skip_if_no_pytz() from dateutil.tz import tzlocal from pytz import utc as UTC @@ -1546,7 +1533,7 @@ def test_to_period_tz_pytz(self): self.assertTrue(ts.to_period().equals(xp)) def test_to_period_tz_explicit_pytz(self): - _skip_if_no_pytz() + tm._skip_if_no_pytz() import pytz from dateutil.tz import tzlocal @@ -1577,7 +1564,7 @@ def test_to_period_tz_explicit_pytz(self): self.assert_(ts.to_period().equals(xp)) def test_to_period_tz_dateutil(self): - _skip_if_no_dateutil() + tm._skip_if_no_dateutil() import dateutil from dateutil.tz import tzlocal @@ -1764,7 +1751,7 @@ def test_append_concat(self): def test_append_concat_tz(self): #GH 2938 - _skip_if_no_pytz() + tm._skip_if_no_pytz() rng = date_range('5/8/2012 1:45', periods=10, freq='5T', tz='US/Eastern') @@ -1787,7 +1774,7 @@ def test_append_concat_tz(self): def test_append_concat_tz_explicit_pytz(self): # GH 2938 - _skip_if_no_pytz() + tm._skip_if_no_pytz() from pytz import timezone as timezone rng = date_range('5/8/2012 1:45', periods=10, freq='5T', @@ -1811,7 +1798,7 @@ def test_append_concat_tz_explicit_pytz(self): def test_append_concat_tz_dateutil(self): # GH 2938 - _skip_if_no_dateutil() + tm._skip_if_no_dateutil() from dateutil.tz import gettz as timezone rng = date_range('5/8/2012 1:45', periods=10, freq='5T', @@ -2013,7 +2000,7 @@ def test_period_resample(self): def test_period_resample_with_local_timezone_pytz(self): # GH5430 - _skip_if_no_pytz() + tm._skip_if_no_pytz() import pytz local_timezone = pytz.timezone('America/Los_Angeles') @@ -2034,7 +2021,7 @@ def test_period_resample_with_local_timezone_pytz(self): def test_period_resample_with_local_timezone_dateutil(self): # GH5430 - _skip_if_no_dateutil() + tm._skip_if_no_dateutil() import dateutil local_timezone = 'dateutil/America/Los_Angeles' @@ -2437,7 +2424,7 @@ def test_insert(self): self.assertTrue(result.freq is None) # GH 7299 - _skip_if_no_pytz() + tm._skip_if_no_pytz() import pytz idx = date_range('1/1/2000', periods=3, freq='D', tz='Asia/Tokyo', name='idx') @@ -3244,7 +3231,7 @@ def test_string_index_series_name_converted(self): class TestTimestamp(tm.TestCase): def test_class_ops_pytz(self): - _skip_if_no_pytz() + tm._skip_if_no_pytz() from pytz import timezone def compare(x, y): @@ -3256,7 +3243,7 @@ def compare(x, y): compare(Timestamp.today(), datetime.today()) def test_class_ops_dateutil(self): - _skip_if_no_dateutil() + tm._skip_if_no_dateutil() from dateutil.tz import tzutc def compare(x,y): @@ -3370,7 +3357,7 @@ def test_comparison(self): self.assertTrue(other >= val) def test_cant_compare_tz_naive_w_aware(self): - _skip_if_no_pytz() + tm._skip_if_no_pytz() # #1404 a = Timestamp('3/12/2012') b = Timestamp('3/12/2012', tz='utc') @@ -3392,7 +3379,7 @@ def test_cant_compare_tz_naive_w_aware(self): self.assertFalse(a.to_pydatetime() == b) def test_cant_compare_tz_naive_w_aware_explicit_pytz(self): - _skip_if_no_pytz() + tm._skip_if_no_pytz() from pytz import utc # #1404 a = Timestamp('3/12/2012') @@ -3415,7 +3402,7 @@ def test_cant_compare_tz_naive_w_aware_explicit_pytz(self): self.assertFalse(a.to_pydatetime() == b) def test_cant_compare_tz_naive_w_aware_dateutil(self): - _skip_if_no_dateutil() + tm._skip_if_no_dateutil() from dateutil.tz import tzutc utc = tzutc() # #1404 diff --git a/pandas/tseries/tests/test_timeseries_legacy.py b/pandas/tseries/tests/test_timeseries_legacy.py index 5aab1147a5742..1f811af0e24ba 100644 --- a/pandas/tseries/tests/test_timeseries_legacy.py +++ b/pandas/tseries/tests/test_timeseries_legacy.py @@ -2,7 +2,6 @@ from datetime import datetime, time, timedelta import sys import os -import unittest import nose @@ -15,39 +14,19 @@ import pandas.core.datetools as datetools import pandas.tseries.offsets as offsets -import pandas.tseries.frequencies as fmod import pandas as pd from pandas.util.testing import assert_series_equal, assert_almost_equal import pandas.util.testing as tm -from pandas.tslib import NaT, iNaT -import pandas.lib as lib -import pandas.tslib as tslib - -import pandas.index as _index - from pandas.compat import( range, long, StringIO, lrange, lmap, map, zip, cPickle as pickle, product ) from pandas import read_pickle -import pandas.core.datetools as dt from numpy.random import rand -from numpy.testing import assert_array_equal -from pandas.util.testing import assert_frame_equal import pandas.compat as compat from pandas.core.datetools import BDay -import pandas.core.common as com -from pandas import concat - -from numpy.testing.decorators import slow - -def _skip_if_no_pytz(): - try: - import pytz - except ImportError: - raise nose.SkipTest("pytz not installed") # infortunately, too much has changed to handle these legacy pickles # class TestLegacySupport(unittest.TestCase): diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py index 0fec3e48c674a..9c374716a84ee 100644 --- a/pandas/tseries/tests/test_timezones.py +++ b/pandas/tseries/tests/test_timezones.py @@ -2,7 +2,6 @@ from datetime import datetime, timedelta, tzinfo, date import sys import os -import unittest import nose import numpy as np @@ -26,17 +25,6 @@ from pandas import _np_version_under1p7 -def _skip_if_no_pytz(): - try: - import pytz - except ImportError: - raise nose.SkipTest("pytz not installed") - -def _skip_if_no_dateutil(): - try: - import dateutil - except ImportError: - raise nose.SkipTest try: import pytz @@ -73,7 +61,7 @@ class TestTimeZoneSupportPytz(tm.TestCase): _multiprocess_can_split_ = True def setUp(self): - _skip_if_no_pytz() + tm._skip_if_no_pytz() def tz(self, tz): ''' Construct a timezone object from a string. Overridden in subclass to parameterize tests. ''' @@ -493,7 +481,7 @@ def test_tz_string(self): self.assertTrue(result.equals(expected)) def test_take_dont_lose_meta(self): - _skip_if_no_pytz() + tm._skip_if_no_pytz() rng = date_range('1/1/2000', periods=20, tz=self.tzstr('US/Eastern')) result = rng.take(lrange(5)) @@ -759,7 +747,7 @@ class TestTimeZoneSupportDateutil(TestTimeZoneSupportPytz): _multiprocess_can_split_ = True def setUp(self): - _skip_if_no_dateutil() + tm._skip_if_no_dateutil() def tz(self, tz): ''' @@ -816,7 +804,7 @@ class TestTimeZones(tm.TestCase): _multiprocess_can_split_ = True def setUp(self): - _skip_if_no_pytz() + tm._skip_if_no_pytz() def test_index_equals_with_tz(self): left = date_range('1/1/2011', periods=100, freq='H', tz='utc') diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 95854e33e0ede..9c49014a47da7 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -209,6 +209,19 @@ def setUpClass(cls): cls.setUpClass = setUpClass return cls +def _skip_if_no_scipy(): + try: + import scipy.stats + except ImportError: + import nose + raise nose.SkipTest("no scipy.stats module") + try: + import scipy.interpolate + except ImportError: + import nose + raise nose.SkipTest('scipy.interpolate missing') + + def _skip_if_no_pytz(): try: import pytz @@ -216,6 +229,22 @@ def _skip_if_no_pytz(): import nose raise nose.SkipTest("pytz not installed") + +def _skip_if_no_dateutil(): + try: + import dateutil + except ImportError: + import nose + raise nose.SkipTest("dateutil not installed") + + +def _skip_if_no_cday(): + from pandas.core.datetools import cday + if cday is None: + import nose + raise nose.SkipTest("CustomBusinessDay not available.") + + #------------------------------------------------------------------------------ # locale utilities
- Moved duplicated module check functions to `util.testing` - Remove some unnecessary import
https://api.github.com/repos/pandas-dev/pandas/pulls/7451
2014-06-13T21:29:07Z
2014-06-21T20:11:43Z
2014-06-21T20:11:43Z
2014-06-21T22:42:21Z
FIX: Enable fixed width strings to be read from Stata 13 (117) files
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index be0b3bc543c39..c3e5e4989139f 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -220,6 +220,8 @@ Bug Fixes - Bug where ``nanops._has_infs`` doesn't work with many dtypes (:issue:`7357`) - Bug in ``StataReader.data`` where reading a 0-observation dta failed (:issue:`7369`) +- Bug in when reading Stata 13 (117) files containing fixed width strings (:issue:`7360`) +- Bug in when writing Stata files where the encoding was ignored (:issue:`7286`) diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 7bb466794c44d..ed6b540b890a2 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -512,8 +512,10 @@ def _read_header(self): try: i = 0 for typ in typlist: - if typ <= 2045 or typ == 32768: - self.typlist[i] = None + if typ <= 2045: + self.typlist[i] = typ + elif typ == 32768: + raise ValueError("Long strings are not supported") else: self.typlist[i] = self.TYPE_MAP_XML[typ] i += 1 @@ -1326,7 +1328,10 @@ def _write_data_nodates(self): var = _pad_bytes('', typ) if len(var) < typ: var = _pad_bytes(var, typ) - self._write(var) + if compat.PY3: + self._write(var) + else: + self._write(var.encode(self._encoding)) else: try: self._file.write(struct.pack(byteorder + TYPE_MAP[typ], @@ -1356,7 +1361,10 @@ def _write_data_dates(self): if typ <= 244: # we've got a string if len(var) < typ: var = _pad_bytes(var, typ) - self._write(var) + if compat.PY3: + self._write(var) + else: + self._write(var.encode(self._encoding)) else: self._file.write(struct.pack(byteorder+TYPE_MAP[typ], var)) diff --git a/pandas/io/tests/data/stata5_117.dta b/pandas/io/tests/data/stata5_117.dta new file mode 100644 index 0000000000000..afbd3b0e0afe3 Binary files /dev/null and b/pandas/io/tests/data/stata5_117.dta differ diff --git a/pandas/io/tests/data/stata6_117.dta b/pandas/io/tests/data/stata6_117.dta new file mode 100644 index 0000000000000..bf4f3838e8be7 Binary files /dev/null and b/pandas/io/tests/data/stata6_117.dta differ diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py index 8221eccc3ebe3..b045867b06263 100644 --- a/pandas/io/tests/test_stata.py +++ b/pandas/io/tests/test_stata.py @@ -60,11 +60,13 @@ def setUp(self): self.dta14_113 = os.path.join(self.dirpath, 'stata5_113.dta') self.dta14_114 = os.path.join(self.dirpath, 'stata5_114.dta') self.dta14_115 = os.path.join(self.dirpath, 'stata5_115.dta') + self.dta14_117 = os.path.join(self.dirpath, 'stata5_117.dta') self.csv15 = os.path.join(self.dirpath, 'stata6.csv') self.dta15_113 = os.path.join(self.dirpath, 'stata6_113.dta') self.dta15_114 = os.path.join(self.dirpath, 'stata6_114.dta') self.dta15_115 = os.path.join(self.dirpath, 'stata6_115.dta') + self.dta15_117 = os.path.join(self.dirpath, 'stata6_117.dta') def read_dta(self, file): return read_stata(file, convert_dates=True) @@ -281,6 +283,11 @@ def test_encoding(self): self.assertEqual(result, expected) self.assertIsInstance(result, unicode) + with tm.ensure_clean() as path: + encoded.to_stata(path,encoding='latin-1', write_index=False) + reread_encoded = read_stata(path, encoding='latin-1') + tm.assert_frame_equal(encoded, reread_encoded) + def test_read_write_dta11(self): original = DataFrame([(1, 2, 3, 4)], columns=['good', compat.u('b\u00E4d'), '8number', 'astringwithmorethan32characters______']) @@ -354,9 +361,12 @@ def test_read_write_reread_dta14(self): parsed_114.index.name = 'index' parsed_115 = self.read_dta(self.dta14_115) parsed_115.index.name = 'index' + parsed_117 = self.read_dta(self.dta14_117) + parsed_117.index.name = 'index' tm.assert_frame_equal(parsed_114, parsed_113) tm.assert_frame_equal(parsed_114, parsed_115) + tm.assert_frame_equal(parsed_114, parsed_117) with tm.ensure_clean() as path: parsed_114.to_stata(path, {'date_td': 'td'}) @@ -375,10 +385,12 @@ def test_read_write_reread_dta15(self): parsed_113 = self.read_dta(self.dta15_113) parsed_114 = self.read_dta(self.dta15_114) parsed_115 = self.read_dta(self.dta15_115) + parsed_117 = self.read_dta(self.dta15_117) tm.assert_frame_equal(expected, parsed_114) tm.assert_frame_equal(parsed_113, parsed_114) tm.assert_frame_equal(parsed_114, parsed_115) + tm.assert_frame_equal(parsed_114, parsed_117) def test_timestamp_and_label(self): original = DataFrame([(1,)], columns=['var'])
Fixes a bug which prevented files containing fixed width string data from being read. Stata 13 files also allow variable length strings, which are not supported in the current version, and an explicit exception regarding this type is now given. Added tests which cover these cases, and Stata 13 format files. fixes #7360
https://api.github.com/repos/pandas-dev/pandas/pulls/7450
2014-06-13T20:23:23Z
2014-06-16T12:51:56Z
2014-06-16T12:51:56Z
2014-06-16T12:52:01Z
Fix bug where ``nanops._has_infs`` doesn't work with many dtypes (issue #7357)
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index aacc4ae989611..c92e853ad8317 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -219,6 +219,8 @@ Bug Fixes 1-dimensional ``nan`` arrays (:issue:`7354`) - Bug where ``nanops.nanmedian`` doesn't work when ``axis==None`` (:issue:`7352`) +- Bug where ``nanops._has_infs`` doesn't work with many dtypes + (:issue:`7357`) diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 4b78164003eed..c3e1da61330fa 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -105,14 +105,18 @@ def _bn_ok_dtype(dt, name): return True return False + def _has_infs(result): if isinstance(result, np.ndarray): if result.dtype == 'f8': - return lib.has_infs_f8(result) + return lib.has_infs_f8(result.ravel()) elif result.dtype == 'f4': - return lib.has_infs_f4(result) + return lib.has_infs_f4(result.ravel()) + try: + return np.isinf(result).any() + except (TypeError, NotImplementedError) as e: + # if it doesn't support infs, then it can't have infs return False - return np.isinf(result) or np.isneginf(result) def _get_fill_value(dtype, fill_value=None, fill_value_typ=None): @@ -538,18 +542,20 @@ def _maybe_arg_null_out(result, axis, mask, skipna): def _get_counts(mask, axis): - if axis is not None: - count = (mask.shape[axis] - mask.sum(axis)).astype(float) - else: - count = float(mask.size - mask.sum()) + if axis is None: + return float(mask.size - mask.sum()) - return count + count = mask.shape[axis] - mask.sum(axis) + try: + return count.astype(float) + except AttributeError: + return np.array(count, dtype=float) def _maybe_null_out(result, axis, mask): if axis is not None and getattr(result, 'ndim', False): null_mask = (mask.shape[axis] - mask.sum(axis)) == 0 - if null_mask.any(): + if np.any(null_mask): if np.iscomplexobj(result): result = result.astype('c16') else: @@ -638,8 +644,16 @@ def nancov(a, b, min_periods=None): def _ensure_numeric(x): if isinstance(x, np.ndarray): - if x.dtype == np.object_: + if x.dtype.kind in ['i', 'b']: x = x.astype(np.float64) + elif x.dtype == np.object_: + try: + x = x.astype(np.complex128) + except: + x = x.astype(np.float64) + else: + if not np.any(x.imag): + x = x.real elif not (com.is_float(x) or com.is_integer(x) or com.is_complex(x)): try: x = float(x) diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index 118ccf0141225..417cef92412b1 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -597,25 +597,27 @@ def check_bool(self, func, value, correct, *args, **kwargs): break def test__has_infs(self): - pairs = [('arr_complex_1d', False), - ('arr_int_1d', False), - ('arr_bool_1d', False), - ('arr_str_1d', False), - ('arr_utf_1d', False), - ('arr_complex_1d', False), - ('arr_complex_nan_1d', False), - - ('arr_nan_nanj_1d', False)] - pairs_float = [('arr_float_1d', False), - ('arr_nan_1d', False), - ('arr_float_nan_1d', False), - ('arr_nan_nan_1d', False), - - ('arr_float_inf_1d', True), - ('arr_inf_1d', True), - ('arr_nan_inf_1d', True), - ('arr_float_nan_inf_1d', True), - ('arr_nan_nan_inf_1d', True)] + pairs = [('arr_complex', False), + ('arr_int', False), + ('arr_bool', False), + ('arr_str', False), + ('arr_utf', False), + ('arr_complex', False), + ('arr_complex_nan', False), + + ('arr_nan_nanj', False), + ('arr_nan_infj', True), + ('arr_complex_nan_infj', True)] + pairs_float = [('arr_float', False), + ('arr_nan', False), + ('arr_float_nan', False), + ('arr_nan_nan', False), + + ('arr_float_inf', True), + ('arr_inf', True), + ('arr_nan_inf', True), + ('arr_float_nan_inf', True), + ('arr_nan_nan_inf', True)] for arr, correct in pairs: val = getattr(self, arr) @@ -630,6 +632,7 @@ def test__has_infs(self): try: self.check_bool(nanops._has_infs, val, correct) self.check_bool(nanops._has_infs, val.astype('f4'), correct) + self.check_bool(nanops._has_infs, val.astype('f2'), correct) except BaseException as exc: exc.args += (arr,) raise
Fixes issue #7357, where where `nanops._has_infs` doesn't work with many dtypes
https://api.github.com/repos/pandas-dev/pandas/pulls/7448
2014-06-13T08:49:59Z
2014-06-13T16:42:59Z
2014-06-13T16:42:59Z
2014-06-22T15:29:59Z
WIP: searchsorted implementation
diff --git a/doc/source/api.rst b/doc/source/api.rst index cce15685035d0..4dd055bce0a0a 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -415,6 +415,7 @@ Reshaping, sorting Series.sortlevel Series.swaplevel Series.unstack + Series.searchsorted Combining / joining / merging ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/basics.rst b/doc/source/basics.rst index b32874f5ca7d8..d0094eae16d38 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -1230,6 +1230,19 @@ argument: compatibility with NumPy methods which expect the ``ndarray.sort`` behavior. ``Series.order`` returns a copy of the sorted data. +Series has the ``searchsorted`` method, which works similar to +``np.ndarray.searchsorted``. + +.. ipython:: python + + ser = Series([1, 2, 3]) + ser.searchsorted([0, 3]) + ser.searchsorted([0, 4]) + ser.searchsorted([1, 3], side='right') + ser.searchsorted([1, 3], side='left') + ser = Series([3, 1, 2]) + ser.searchsorted([0, 3], sorter=np.argsort(ser)) + .. _basics.nsorted: smallest / largest values diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index 1d9acadb68e58..b7d0f9c8f247f 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -821,6 +821,8 @@ Enhancements - ``DataFrame.fillna`` can now accept a ``DataFrame`` as a fill value (:issue:`8377`) +- Added ``searchsorted`` method to ``Series`` objects (:issue:`7447`) + .. _whatsnew_0150.performance: Performance diff --git a/pandas/core/series.py b/pandas/core/series.py index 37f66fc56ea56..24cfe9c54b3d9 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1373,6 +1373,62 @@ def dot(self, other): else: # pragma: no cover raise TypeError('unsupported type: %s' % type(other)) + def searchsorted(self, v, side='left', sorter=None): + """Find indices where elements should be inserted to maintain order. + + Find the indices into a sorted Series `self` such that, if the + corresponding elements in `v` were inserted before the indices, the + order of `self` would be preserved. + + Parameters + ---------- + v : array_like + Values to insert into `a`. + side : {'left', 'right'}, optional + If 'left', the index of the first suitable location found is given. + If 'right', return the last such index. If there is no suitable + index, return either 0 or N (where N is the length of `a`). + sorter : 1-D array_like, optional + Optional array of integer indices that sort `self` into ascending + order. They are typically the result of ``np.argsort``. + + Returns + ------- + indices : array of ints + Array of insertion points with the same shape as `v`. + + See Also + -------- + Series.sort + Series.order + numpy.searchsorted + + Notes + ----- + Binary search is used to find the required insertion points. + + Examples + -------- + >>> x = pd.Series([1, 2, 3]) + >>> x + 0 1 + 1 2 + 2 3 + dtype: int64 + >>> x.searchsorted(4) + array([3]) + >>> x.searchsorted([0, 4]) + array([0, 3]) + >>> x.searchsorted([1, 3], side='left') + array([0, 2]) + >>> x.searchsorted([1, 3], side='right') + array([1, 3]) + >>> x.searchsorted([1, 2], side='right', sorter=[0, 2, 1]) + array([1, 3]) + """ + return self.values.searchsorted(Series(v).values, side=side, + sorter=sorter) + #------------------------------------------------------------------------------ # Combination diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 54040ced71e61..a8599bcda8513 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -5956,7 +5956,6 @@ def test_replace_with_single_list(self): s.replace([1,2,3],inplace=True,method='crash_cymbal') assert_series_equal(s, ser) - def test_replace_mixed_types(self): s = Series(np.arange(5),dtype='int64') @@ -6164,6 +6163,37 @@ def test_concat_empty_series_dtypes(self): self.assertEqual(pd.concat([Series(dtype=np.bool_), Series(dtype=np.int32)]).dtype, np.int32) + def test_searchsorted_numeric_dtypes_scalar(self): + s = Series([1, 2, 90, 1000, 3e9]) + r = s.searchsorted(30) + e = 2 + tm.assert_equal(r, e) + + r = s.searchsorted([30]) + e = np.array([2]) + tm.assert_array_equal(r, e) + + def test_searchsorted_numeric_dtypes_vector(self): + s = Series([1, 2, 90, 1000, 3e9]) + r = s.searchsorted([91, 2e6]) + e = np.array([3, 4]) + tm.assert_array_equal(r, e) + + def test_search_sorted_datetime64_scalar(self): + s = Series(pd.date_range('20120101', periods=10, freq='2D')) + v = pd.Timestamp('20120102') + r = s.searchsorted(v) + e = 1 + tm.assert_equal(r, e) + + def test_search_sorted_datetime64_list(self): + s = Series(pd.date_range('20120101', periods=10, freq='2D')) + v = [pd.Timestamp('20120102'), pd.Timestamp('20120104')] + r = s.searchsorted(v) + e = np.array([1, 2]) + tm.assert_array_equal(r, e) + + class TestSeriesNonUnique(tm.TestCase): diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 2b775201d9900..977d445f917a8 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -21,6 +21,7 @@ from numpy.random import randn, rand import numpy as np +from numpy.testing import assert_array_equal import pandas as pd from pandas.core.common import _is_sequence, array_equivalent
closes #6712 we've decided to simply return `self.values.searchsorted`, since that's what was happening before, and because dealing with non-monotonic indices is a PITA and yields marginal benefit when you can just use `s.iloc[s.searchsorted(...)]` - [x] ~~`timedelta64` tests~~ #8464 - [x] ~~more edge case testing~~ #8464 - [x] ~~how should non-monotonic indexes be handled (currently raising a `ValueError`)?~~ - [x] ~~`datetime64` (and probably `timedelta64`) coercion needs work~~ - [x] ~~`side` argument testing~~ #8464 - [x] documentation - [x] docstring - [x] add to api.rst - [x] add to basics.rst
https://api.github.com/repos/pandas-dev/pandas/pulls/7447
2014-06-13T03:05:18Z
2014-10-05T01:02:29Z
2014-10-05T01:02:29Z
2014-10-05T01:07:09Z
_sanitize_column now reports proper duplicate error
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 5e31b14fa7bd3..06494a7167e64 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1962,6 +1962,8 @@ def _sanitize_column(self, key, value): # GH 4107 try: value = value.reindex(self.index).values + except ValueError as e: + raise e except: raise TypeError('incompatible index of inserted column ' 'with frame index')
closes #7432 A very simple fix for GH7432. `internals.py` (line 3240) actually raises the correct exception (`ValueError: cannot reindex from a duplicate axis`) but the except: doesn't allow it to be raised. Here I've just allowed the exception itself to be raised since it's more useful to the user than the weird `TypeError` message.
https://api.github.com/repos/pandas-dev/pandas/pulls/7442
2014-06-12T16:27:55Z
2014-06-13T19:34:50Z
2014-06-13T19:34:50Z
2014-06-14T10:58:15Z
support axis=None for nanmedian ( issue #7352 )
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 92e19ba43ccb7..aacc4ae989611 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -217,6 +217,8 @@ Bug Fixes (:issue:`7353`) - Bug in several ``nanops`` functions when ``axis==0`` for 1-dimensional ``nan`` arrays (:issue:`7354`) +- Bug where ``nanops.nanmedian`` doesn't work when ``axis==None`` + (:issue:`7352`) diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 3be194f9673f3..4b78164003eed 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -286,6 +286,9 @@ def get_median(x): if values.dtype != np.float64: values = values.astype('f8') + if axis is None: + values = values.ravel() + notempty = values.size # an array from a frame diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index 47ca288177946..118ccf0141225 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -13,6 +13,8 @@ class TestnanopsDataFrame(tm.TestCase): def setUp(self): + np.random.seed(11235) + self.arr_shape = (11, 7, 5) self.arr_float = np.random.randn(*self.arr_shape) @@ -118,11 +120,38 @@ def check_results(self, targ, res, axis): res = getattr(res, 'values', res) if axis != 0 and hasattr(targ, 'shape') and targ.ndim: res = np.split(res, [targ.shape[0]], axis=0)[0] - tm.assert_almost_equal(targ, res) + try: + tm.assert_almost_equal(targ, res) + except: + # There are sometimes rounding errors with + # complex and object dtypes. + # If it isn't one of those, re-raise the error. + if not hasattr(res, 'dtype') or res.dtype.kind not in ['c', 'O']: + raise + # convert object dtypes to something that can be split into + # real and imaginary parts + if res.dtype.kind == 'O': + if targ.dtype.kind != 'O': + res = res.astype(targ.dtype) + else: + try: + res = res.astype('c16') + except: + res = res.astype('f8') + try: + targ = targ.astype('c16') + except: + targ = targ.astype('f8') + # there should never be a case where numpy returns an object + # but nanops doesn't, so make that an exception + elif targ.dtype.kind == 'O': + raise + tm.assert_almost_equal(targ.real, res.real) + tm.assert_almost_equal(targ.imag, res.imag) def check_fun_data(self, testfunc, targfunc, testarval, targarval, targarnanval, **kwargs): - for axis in list(range(targarval.ndim)): + for axis in list(range(targarval.ndim))+[None]: for skipna in [False, True]: targartempval = targarval if skipna else targarnanval try: @@ -215,6 +244,12 @@ def check_funs(self, testfunc, targfunc, if allow_obj: self.arr_obj = np.vstack(objs) + # some nanops handle object dtypes better than their numpy + # counterparts, so the numpy functions need to be given something + # else + if allow_obj == 'convert': + targfunc = partial(self._badobj_wrap, + func=targfunc, allow_complex=allow_complex) self.check_fun(testfunc, targfunc, 'arr_obj', **kwargs) def check_funs_ddof(self, testfunc, targfunc, @@ -229,6 +264,14 @@ def check_funs_ddof(self, testfunc, targfunc, except BaseException as exc: exc.args += ('ddof %s' % ddof,) + def _badobj_wrap(self, value, func, allow_complex=True, **kwargs): + if value.dtype.kind == 'O': + if allow_complex: + value = value.astype('c16') + else: + value = value.astype('f8') + return func(value, **kwargs) + def test_nanany(self): self.check_funs(nanops.nanany, np.any, allow_all_nan=False, allow_str=False, allow_date=False) @@ -241,36 +284,15 @@ def test_nansum(self): self.check_funs(nanops.nansum, np.sum, allow_str=False, allow_date=False) - def _nanmean_wrap(self, value, *args, **kwargs): - dtype = value.dtype - res = nanops.nanmean(value, *args, **kwargs) - if dtype.kind == 'O': - res = np.round(res, decimals=13) - return res - - def _mean_wrap(self, value, *args, **kwargs): - dtype = value.dtype - if dtype.kind == 'O': - value = value.astype('c16') - res = np.mean(value, *args, **kwargs) - if dtype.kind == 'O': - res = np.round(res, decimals=13) - return res - def test_nanmean(self): - self.check_funs(self._nanmean_wrap, self._mean_wrap, + self.check_funs(nanops.nanmean, np.mean, allow_complex=False, allow_obj=False, allow_str=False, allow_date=False) - def _median_wrap(self, value, *args, **kwargs): - if value.dtype.kind == 'O': - value = value.astype('c16') - res = np.median(value, *args, **kwargs) - return res - def test_nanmedian(self): - self.check_funs(nanops.nanmedian, self._median_wrap, - allow_complex=False, allow_str=False, allow_date=False) + self.check_funs(nanops.nanmedian, np.median, + allow_complex=False, allow_str=False, allow_date=False, + allow_obj='convert') def test_nanvar(self): self.check_funs_ddof(nanops.nanvar, np.var,
This fixes #7352, where `nanmedian` does not work when `axis==None`.
https://api.github.com/repos/pandas-dev/pandas/pulls/7440
2014-06-12T13:18:52Z
2014-06-12T22:41:29Z
2014-06-12T22:41:29Z
2014-06-13T13:23:36Z
TST: Move some window/moments tests to test_ewm
diff --git a/pandas/tests/window/moments/test_moments_consistency_ewm.py b/pandas/tests/window/moments/test_moments_consistency_ewm.py index c79d02fd3237e..b41d2ec23a52d 100644 --- a/pandas/tests/window/moments/test_moments_consistency_ewm.py +++ b/pandas/tests/window/moments/test_moments_consistency_ewm.py @@ -18,58 +18,6 @@ def test_ewm_pairwise_cov_corr(func, frame): tm.assert_series_equal(result, expected, check_names=False) -@pytest.mark.parametrize("name", ["cov", "corr"]) -def test_ewm_corr_cov(name): - A = Series(np.random.randn(50), index=np.arange(50)) - B = A[2:] + np.random.randn(48) - - A[:10] = np.NaN - B[-10:] = np.NaN - - result = getattr(A.ewm(com=20, min_periods=5), name)(B) - assert np.isnan(result.values[:14]).all() - assert not np.isnan(result.values[14:]).any() - - -@pytest.mark.parametrize("min_periods", [0, 1, 2]) -@pytest.mark.parametrize("name", ["cov", "corr"]) -def test_ewm_corr_cov_min_periods(name, min_periods): - # GH 7898 - A = Series(np.random.randn(50), index=np.arange(50)) - B = A[2:] + np.random.randn(48) - - A[:10] = np.NaN - B[-10:] = np.NaN - - result = getattr(A.ewm(com=20, min_periods=min_periods), name)(B) - # binary functions (ewmcov, ewmcorr) with bias=False require at - # least two values - assert np.isnan(result.values[:11]).all() - assert not np.isnan(result.values[11:]).any() - - # check series of length 0 - empty = Series([], dtype=np.float64) - result = getattr(empty.ewm(com=50, min_periods=min_periods), name)(empty) - tm.assert_series_equal(result, empty) - - # check series of length 1 - result = getattr(Series([1.0]).ewm(com=50, min_periods=min_periods), name)( - Series([1.0]) - ) - tm.assert_series_equal(result, Series([np.NaN])) - - -@pytest.mark.parametrize("name", ["cov", "corr"]) -def test_different_input_array_raise_exception(name): - A = Series(np.random.randn(50), index=np.arange(50)) - A[:10] = np.NaN - - msg = "other must be a DataFrame or Series" - # exception raised is Exception - with pytest.raises(ValueError, match=msg): - getattr(A.ewm(com=20, min_periods=5), name)(np.random.randn(50)) - - def create_mock_weights(obj, com, adjust, ignore_na): if isinstance(obj, DataFrame): if not len(obj.columns): diff --git a/pandas/tests/window/moments/test_moments_ewm.py b/pandas/tests/window/moments/test_moments_ewm.py index a7b1d3fbca3fb..f87ff654e554a 100644 --- a/pandas/tests/window/moments/test_moments_ewm.py +++ b/pandas/tests/window/moments/test_moments_ewm.py @@ -1,4 +1,3 @@ -import numpy as np import pytest from pandas import ( @@ -20,187 +19,6 @@ def test_ewma_frame(frame, name): assert isinstance(frame_result, DataFrame) -def test_ewma_adjust(): - vals = Series(np.zeros(1000)) - vals[5] = 1 - result = vals.ewm(span=100, adjust=False).mean().sum() - assert np.abs(result - 1) < 1e-2 - - -@pytest.mark.parametrize("adjust", [True, False]) -@pytest.mark.parametrize("ignore_na", [True, False]) -def test_ewma_cases(adjust, ignore_na): - # try adjust/ignore_na args matrix - - s = Series([1.0, 2.0, 4.0, 8.0]) - - if adjust: - expected = Series([1.0, 1.6, 2.736842, 4.923077]) - else: - expected = Series([1.0, 1.333333, 2.222222, 4.148148]) - - result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean() - tm.assert_series_equal(result, expected) - - -def test_ewma_nan_handling(): - s = Series([1.0] + [np.nan] * 5 + [1.0]) - result = s.ewm(com=5).mean() - tm.assert_series_equal(result, Series([1.0] * len(s))) - - s = Series([np.nan] * 2 + [1.0] + [np.nan] * 2 + [1.0]) - result = s.ewm(com=5).mean() - tm.assert_series_equal(result, Series([np.nan] * 2 + [1.0] * 4)) - - -@pytest.mark.parametrize( - "s, adjust, ignore_na, w", - [ - ( - Series([np.nan, 1.0, 101.0]), - True, - False, - [np.nan, (1.0 - (1.0 / (1.0 + 2.0))), 1.0], - ), - ( - Series([np.nan, 1.0, 101.0]), - True, - True, - [np.nan, (1.0 - (1.0 / (1.0 + 2.0))), 1.0], - ), - ( - Series([np.nan, 1.0, 101.0]), - False, - False, - [np.nan, (1.0 - (1.0 / (1.0 + 2.0))), (1.0 / (1.0 + 2.0))], - ), - ( - Series([np.nan, 1.0, 101.0]), - False, - True, - [np.nan, (1.0 - (1.0 / (1.0 + 2.0))), (1.0 / (1.0 + 2.0))], - ), - ( - Series([1.0, np.nan, 101.0]), - True, - False, - [(1.0 - (1.0 / (1.0 + 2.0))) ** 2, np.nan, 1.0], - ), - ( - Series([1.0, np.nan, 101.0]), - True, - True, - [(1.0 - (1.0 / (1.0 + 2.0))), np.nan, 1.0], - ), - ( - Series([1.0, np.nan, 101.0]), - False, - False, - [(1.0 - (1.0 / (1.0 + 2.0))) ** 2, np.nan, (1.0 / (1.0 + 2.0))], - ), - ( - Series([1.0, np.nan, 101.0]), - False, - True, - [(1.0 - (1.0 / (1.0 + 2.0))), np.nan, (1.0 / (1.0 + 2.0))], - ), - ( - Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]), - True, - False, - [np.nan, (1.0 - (1.0 / (1.0 + 2.0))) ** 3, np.nan, np.nan, 1.0, np.nan], - ), - ( - Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]), - True, - True, - [np.nan, (1.0 - (1.0 / (1.0 + 2.0))), np.nan, np.nan, 1.0, np.nan], - ), - ( - Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]), - False, - False, - [ - np.nan, - (1.0 - (1.0 / (1.0 + 2.0))) ** 3, - np.nan, - np.nan, - (1.0 / (1.0 + 2.0)), - np.nan, - ], - ), - ( - Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]), - False, - True, - [ - np.nan, - (1.0 - (1.0 / (1.0 + 2.0))), - np.nan, - np.nan, - (1.0 / (1.0 + 2.0)), - np.nan, - ], - ), - ( - Series([1.0, np.nan, 101.0, 50.0]), - True, - False, - [ - (1.0 - (1.0 / (1.0 + 2.0))) ** 3, - np.nan, - (1.0 - (1.0 / (1.0 + 2.0))), - 1.0, - ], - ), - ( - Series([1.0, np.nan, 101.0, 50.0]), - True, - True, - [ - (1.0 - (1.0 / (1.0 + 2.0))) ** 2, - np.nan, - (1.0 - (1.0 / (1.0 + 2.0))), - 1.0, - ], - ), - ( - Series([1.0, np.nan, 101.0, 50.0]), - False, - False, - [ - (1.0 - (1.0 / (1.0 + 2.0))) ** 3, - np.nan, - (1.0 - (1.0 / (1.0 + 2.0))) * (1.0 / (1.0 + 2.0)), - (1.0 / (1.0 + 2.0)) - * ((1.0 - (1.0 / (1.0 + 2.0))) ** 2 + (1.0 / (1.0 + 2.0))), - ], - ), - ( - Series([1.0, np.nan, 101.0, 50.0]), - False, - True, - [ - (1.0 - (1.0 / (1.0 + 2.0))) ** 2, - np.nan, - (1.0 - (1.0 / (1.0 + 2.0))) * (1.0 / (1.0 + 2.0)), - (1.0 / (1.0 + 2.0)), - ], - ), - ], -) -def test_ewma_nan_handling_cases(s, adjust, ignore_na, w): - # GH 7603 - expected = (s.multiply(w).cumsum() / Series(w).cumsum()).fillna(method="ffill") - result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean() - - tm.assert_series_equal(result, expected) - if ignore_na is False: - # check that ignore_na defaults to False - result = s.ewm(com=2.0, adjust=adjust).mean() - tm.assert_series_equal(result, expected) - - def test_ewma_span_com_args(series): A = series.ewm(com=9.5).mean() B = series.ewm(span=20).mean() @@ -230,22 +48,6 @@ def test_ewma_halflife_arg(series): series.ewm() -def test_ewm_alpha(): - # GH 10789 - arr = np.random.randn(100) - locs = np.arange(20, 40) - arr[locs] = np.NaN - - s = Series(arr) - a = s.ewm(alpha=0.61722699889169674).mean() - b = s.ewm(com=0.62014947789973052).mean() - c = s.ewm(span=2.240298955799461).mean() - d = s.ewm(halflife=0.721792864318).mean() - tm.assert_series_equal(a, b) - tm.assert_series_equal(a, c) - tm.assert_series_equal(a, d) - - def test_ewm_alpha_arg(series): # GH 10789 s = series @@ -260,96 +62,3 @@ def test_ewm_alpha_arg(series): s.ewm(span=10.0, alpha=0.5) with pytest.raises(ValueError, match=msg): s.ewm(halflife=10.0, alpha=0.5) - - -def test_ewm_domain_checks(): - # GH 12492 - arr = np.random.randn(100) - locs = np.arange(20, 40) - arr[locs] = np.NaN - - s = Series(arr) - msg = "comass must satisfy: comass >= 0" - with pytest.raises(ValueError, match=msg): - s.ewm(com=-0.1) - s.ewm(com=0.0) - s.ewm(com=0.1) - - msg = "span must satisfy: span >= 1" - with pytest.raises(ValueError, match=msg): - s.ewm(span=-0.1) - with pytest.raises(ValueError, match=msg): - s.ewm(span=0.0) - with pytest.raises(ValueError, match=msg): - s.ewm(span=0.9) - s.ewm(span=1.0) - s.ewm(span=1.1) - - msg = "halflife must satisfy: halflife > 0" - with pytest.raises(ValueError, match=msg): - s.ewm(halflife=-0.1) - with pytest.raises(ValueError, match=msg): - s.ewm(halflife=0.0) - s.ewm(halflife=0.1) - - msg = "alpha must satisfy: 0 < alpha <= 1" - with pytest.raises(ValueError, match=msg): - s.ewm(alpha=-0.1) - with pytest.raises(ValueError, match=msg): - s.ewm(alpha=0.0) - s.ewm(alpha=0.1) - s.ewm(alpha=1.0) - with pytest.raises(ValueError, match=msg): - s.ewm(alpha=1.1) - - -@pytest.mark.parametrize("method", ["mean", "std", "var"]) -def test_ew_empty_series(method): - vals = Series([], dtype=np.float64) - - ewm = vals.ewm(3) - result = getattr(ewm, method)() - tm.assert_almost_equal(result, vals) - - -@pytest.mark.parametrize("min_periods", [0, 1]) -@pytest.mark.parametrize("name", ["mean", "var", "std"]) -def test_ew_min_periods(min_periods, name): - # excluding NaNs correctly - arr = np.random.randn(50) - arr[:10] = np.NaN - arr[-10:] = np.NaN - s = Series(arr) - - # check min_periods - # GH 7898 - result = getattr(s.ewm(com=50, min_periods=2), name)() - assert result[:11].isna().all() - assert not result[11:].isna().any() - - result = getattr(s.ewm(com=50, min_periods=min_periods), name)() - if name == "mean": - assert result[:10].isna().all() - assert not result[10:].isna().any() - else: - # ewm.std, ewm.var (with bias=False) require at least - # two values - assert result[:11].isna().all() - assert not result[11:].isna().any() - - # check series of length 0 - result = getattr(Series(dtype=object).ewm(com=50, min_periods=min_periods), name)() - tm.assert_series_equal(result, Series(dtype="float64")) - - # check series of length 1 - result = getattr(Series([1.0]).ewm(50, min_periods=min_periods), name)() - if name == "mean": - tm.assert_series_equal(result, Series([1.0])) - else: - # ewm.std, ewm.var with bias=False require at least - # two values - tm.assert_series_equal(result, Series([np.NaN])) - - # pass in ints - result2 = getattr(Series(np.arange(50)).ewm(span=10), name)() - assert result2.dtype == np.float_ diff --git a/pandas/tests/window/test_ewm.py b/pandas/tests/window/test_ewm.py index 4cb5d0342572b..4ca090fba4955 100644 --- a/pandas/tests/window/test_ewm.py +++ b/pandas/tests/window/test_ewm.py @@ -260,3 +260,343 @@ def test_ewm_sum(expected_data, ignore): result = data.ewm(alpha=0.5, ignore_na=ignore).sum() expected = Series(expected_data) tm.assert_series_equal(result, expected) + + +def test_ewma_adjust(): + vals = Series(np.zeros(1000)) + vals[5] = 1 + result = vals.ewm(span=100, adjust=False).mean().sum() + assert np.abs(result - 1) < 1e-2 + + +def test_ewma_cases(adjust, ignore_na): + # try adjust/ignore_na args matrix + + s = Series([1.0, 2.0, 4.0, 8.0]) + + if adjust: + expected = Series([1.0, 1.6, 2.736842, 4.923077]) + else: + expected = Series([1.0, 1.333333, 2.222222, 4.148148]) + + result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean() + tm.assert_series_equal(result, expected) + + +def test_ewma_nan_handling(): + s = Series([1.0] + [np.nan] * 5 + [1.0]) + result = s.ewm(com=5).mean() + tm.assert_series_equal(result, Series([1.0] * len(s))) + + s = Series([np.nan] * 2 + [1.0] + [np.nan] * 2 + [1.0]) + result = s.ewm(com=5).mean() + tm.assert_series_equal(result, Series([np.nan] * 2 + [1.0] * 4)) + + +@pytest.mark.parametrize( + "s, adjust, ignore_na, w", + [ + ( + Series([np.nan, 1.0, 101.0]), + True, + False, + [np.nan, (1.0 - (1.0 / (1.0 + 2.0))), 1.0], + ), + ( + Series([np.nan, 1.0, 101.0]), + True, + True, + [np.nan, (1.0 - (1.0 / (1.0 + 2.0))), 1.0], + ), + ( + Series([np.nan, 1.0, 101.0]), + False, + False, + [np.nan, (1.0 - (1.0 / (1.0 + 2.0))), (1.0 / (1.0 + 2.0))], + ), + ( + Series([np.nan, 1.0, 101.0]), + False, + True, + [np.nan, (1.0 - (1.0 / (1.0 + 2.0))), (1.0 / (1.0 + 2.0))], + ), + ( + Series([1.0, np.nan, 101.0]), + True, + False, + [(1.0 - (1.0 / (1.0 + 2.0))) ** 2, np.nan, 1.0], + ), + ( + Series([1.0, np.nan, 101.0]), + True, + True, + [(1.0 - (1.0 / (1.0 + 2.0))), np.nan, 1.0], + ), + ( + Series([1.0, np.nan, 101.0]), + False, + False, + [(1.0 - (1.0 / (1.0 + 2.0))) ** 2, np.nan, (1.0 / (1.0 + 2.0))], + ), + ( + Series([1.0, np.nan, 101.0]), + False, + True, + [(1.0 - (1.0 / (1.0 + 2.0))), np.nan, (1.0 / (1.0 + 2.0))], + ), + ( + Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]), + True, + False, + [np.nan, (1.0 - (1.0 / (1.0 + 2.0))) ** 3, np.nan, np.nan, 1.0, np.nan], + ), + ( + Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]), + True, + True, + [np.nan, (1.0 - (1.0 / (1.0 + 2.0))), np.nan, np.nan, 1.0, np.nan], + ), + ( + Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]), + False, + False, + [ + np.nan, + (1.0 - (1.0 / (1.0 + 2.0))) ** 3, + np.nan, + np.nan, + (1.0 / (1.0 + 2.0)), + np.nan, + ], + ), + ( + Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]), + False, + True, + [ + np.nan, + (1.0 - (1.0 / (1.0 + 2.0))), + np.nan, + np.nan, + (1.0 / (1.0 + 2.0)), + np.nan, + ], + ), + ( + Series([1.0, np.nan, 101.0, 50.0]), + True, + False, + [ + (1.0 - (1.0 / (1.0 + 2.0))) ** 3, + np.nan, + (1.0 - (1.0 / (1.0 + 2.0))), + 1.0, + ], + ), + ( + Series([1.0, np.nan, 101.0, 50.0]), + True, + True, + [ + (1.0 - (1.0 / (1.0 + 2.0))) ** 2, + np.nan, + (1.0 - (1.0 / (1.0 + 2.0))), + 1.0, + ], + ), + ( + Series([1.0, np.nan, 101.0, 50.0]), + False, + False, + [ + (1.0 - (1.0 / (1.0 + 2.0))) ** 3, + np.nan, + (1.0 - (1.0 / (1.0 + 2.0))) * (1.0 / (1.0 + 2.0)), + (1.0 / (1.0 + 2.0)) + * ((1.0 - (1.0 / (1.0 + 2.0))) ** 2 + (1.0 / (1.0 + 2.0))), + ], + ), + ( + Series([1.0, np.nan, 101.0, 50.0]), + False, + True, + [ + (1.0 - (1.0 / (1.0 + 2.0))) ** 2, + np.nan, + (1.0 - (1.0 / (1.0 + 2.0))) * (1.0 / (1.0 + 2.0)), + (1.0 / (1.0 + 2.0)), + ], + ), + ], +) +def test_ewma_nan_handling_cases(s, adjust, ignore_na, w): + # GH 7603 + expected = (s.multiply(w).cumsum() / Series(w).cumsum()).fillna(method="ffill") + result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean() + + tm.assert_series_equal(result, expected) + if ignore_na is False: + # check that ignore_na defaults to False + result = s.ewm(com=2.0, adjust=adjust).mean() + tm.assert_series_equal(result, expected) + + +def test_ewm_alpha(): + # GH 10789 + arr = np.random.randn(100) + locs = np.arange(20, 40) + arr[locs] = np.NaN + + s = Series(arr) + a = s.ewm(alpha=0.61722699889169674).mean() + b = s.ewm(com=0.62014947789973052).mean() + c = s.ewm(span=2.240298955799461).mean() + d = s.ewm(halflife=0.721792864318).mean() + tm.assert_series_equal(a, b) + tm.assert_series_equal(a, c) + tm.assert_series_equal(a, d) + + +def test_ewm_domain_checks(): + # GH 12492 + arr = np.random.randn(100) + locs = np.arange(20, 40) + arr[locs] = np.NaN + + s = Series(arr) + msg = "comass must satisfy: comass >= 0" + with pytest.raises(ValueError, match=msg): + s.ewm(com=-0.1) + s.ewm(com=0.0) + s.ewm(com=0.1) + + msg = "span must satisfy: span >= 1" + with pytest.raises(ValueError, match=msg): + s.ewm(span=-0.1) + with pytest.raises(ValueError, match=msg): + s.ewm(span=0.0) + with pytest.raises(ValueError, match=msg): + s.ewm(span=0.9) + s.ewm(span=1.0) + s.ewm(span=1.1) + + msg = "halflife must satisfy: halflife > 0" + with pytest.raises(ValueError, match=msg): + s.ewm(halflife=-0.1) + with pytest.raises(ValueError, match=msg): + s.ewm(halflife=0.0) + s.ewm(halflife=0.1) + + msg = "alpha must satisfy: 0 < alpha <= 1" + with pytest.raises(ValueError, match=msg): + s.ewm(alpha=-0.1) + with pytest.raises(ValueError, match=msg): + s.ewm(alpha=0.0) + s.ewm(alpha=0.1) + s.ewm(alpha=1.0) + with pytest.raises(ValueError, match=msg): + s.ewm(alpha=1.1) + + +@pytest.mark.parametrize("method", ["mean", "std", "var"]) +def test_ew_empty_series(method): + vals = Series([], dtype=np.float64) + + ewm = vals.ewm(3) + result = getattr(ewm, method)() + tm.assert_almost_equal(result, vals) + + +@pytest.mark.parametrize("min_periods", [0, 1]) +@pytest.mark.parametrize("name", ["mean", "var", "std"]) +def test_ew_min_periods(min_periods, name): + # excluding NaNs correctly + arr = np.random.randn(50) + arr[:10] = np.NaN + arr[-10:] = np.NaN + s = Series(arr) + + # check min_periods + # GH 7898 + result = getattr(s.ewm(com=50, min_periods=2), name)() + assert result[:11].isna().all() + assert not result[11:].isna().any() + + result = getattr(s.ewm(com=50, min_periods=min_periods), name)() + if name == "mean": + assert result[:10].isna().all() + assert not result[10:].isna().any() + else: + # ewm.std, ewm.var (with bias=False) require at least + # two values + assert result[:11].isna().all() + assert not result[11:].isna().any() + + # check series of length 0 + result = getattr(Series(dtype=object).ewm(com=50, min_periods=min_periods), name)() + tm.assert_series_equal(result, Series(dtype="float64")) + + # check series of length 1 + result = getattr(Series([1.0]).ewm(50, min_periods=min_periods), name)() + if name == "mean": + tm.assert_series_equal(result, Series([1.0])) + else: + # ewm.std, ewm.var with bias=False require at least + # two values + tm.assert_series_equal(result, Series([np.NaN])) + + # pass in ints + result2 = getattr(Series(np.arange(50)).ewm(span=10), name)() + assert result2.dtype == np.float_ + + +@pytest.mark.parametrize("name", ["cov", "corr"]) +def test_ewm_corr_cov(name): + A = Series(np.random.randn(50), index=np.arange(50)) + B = A[2:] + np.random.randn(48) + + A[:10] = np.NaN + B[-10:] = np.NaN + + result = getattr(A.ewm(com=20, min_periods=5), name)(B) + assert np.isnan(result.values[:14]).all() + assert not np.isnan(result.values[14:]).any() + + +@pytest.mark.parametrize("min_periods", [0, 1, 2]) +@pytest.mark.parametrize("name", ["cov", "corr"]) +def test_ewm_corr_cov_min_periods(name, min_periods): + # GH 7898 + A = Series(np.random.randn(50), index=np.arange(50)) + B = A[2:] + np.random.randn(48) + + A[:10] = np.NaN + B[-10:] = np.NaN + + result = getattr(A.ewm(com=20, min_periods=min_periods), name)(B) + # binary functions (ewmcov, ewmcorr) with bias=False require at + # least two values + assert np.isnan(result.values[:11]).all() + assert not np.isnan(result.values[11:]).any() + + # check series of length 0 + empty = Series([], dtype=np.float64) + result = getattr(empty.ewm(com=50, min_periods=min_periods), name)(empty) + tm.assert_series_equal(result, empty) + + # check series of length 1 + result = getattr(Series([1.0]).ewm(com=50, min_periods=min_periods), name)( + Series([1.0]) + ) + tm.assert_series_equal(result, Series([np.NaN])) + + +@pytest.mark.parametrize("name", ["cov", "corr"]) +def test_different_input_array_raise_exception(name): + A = Series(np.random.randn(50), index=np.arange(50)) + A[:10] = np.NaN + + msg = "other must be a DataFrame or Series" + # exception raised is Exception + with pytest.raises(ValueError, match=msg): + getattr(A.ewm(com=20, min_periods=5), name)(np.random.randn(50))
- [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them Since some tests are not running on `windows/moments` (https://github.com/pandas-dev/pandas/issues/37535), moving some lighter ewm tests to `test_ewm.py`
https://api.github.com/repos/pandas-dev/pandas/pulls/44128
2021-10-21T05:22:51Z
2021-10-24T01:51:55Z
2021-10-24T01:51:55Z
2021-10-24T01:51:58Z
Backport PR #44124 on branch 1.3.x (CI: Use Windows and MacOS Numpy Wheels)
diff --git a/.github/workflows/python-dev.yml b/.github/workflows/python-dev.yml index b32b18b86e9df..4fe58ad4d60e9 100644 --- a/.github/workflows/python-dev.yml +++ b/.github/workflows/python-dev.yml @@ -30,9 +30,6 @@ jobs: name: actions-310-dev timeout-minutes: 60 - env: - NUMPY_WHEELS_AVAILABLE: ${{ matrix.os == 'ubuntu-latest' }} - concurrency: group: ${{ github.ref }}-${{ matrix.os }}-dev cancel-in-progress: ${{github.event_name == 'pull_request'}} @@ -51,11 +48,7 @@ jobs: shell: bash run: | python -m pip install --upgrade pip setuptools wheel - if [[ "$NUMPY_WHEELS_AVAILABLE" == "true" ]]; then - pip install -i https://pypi.anaconda.org/scipy-wheels-nightly/simple numpy - else - pip install git+https://github.com/numpy/numpy.git - fi + pip install -i https://pypi.anaconda.org/scipy-wheels-nightly/simple numpy pip install git+https://github.com/nedbat/coveragepy.git pip install cython python-dateutil pytz hypothesis pytest>=6.2.5 pytest-xdist pytest-cov pip list
Backport PR #44124: CI: Use Windows and MacOS Numpy Wheels
https://api.github.com/repos/pandas-dev/pandas/pulls/44127
2021-10-21T01:01:12Z
2021-10-21T02:29:12Z
2021-10-21T02:29:12Z
2021-10-21T02:29:12Z
REF: use concat_compat in union_with_duplicates
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 2bcfe767203bd..c1b587ce3a6b2 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -61,6 +61,7 @@ is_timedelta64_dtype, needs_i8_conversion, ) +from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.dtypes import PandasDtype from pandas.core.dtypes.generic import ( ABCDatetimeArray, @@ -1834,17 +1835,18 @@ def union_with_duplicates(lvals: ArrayLike, rvals: ArrayLike) -> ArrayLike: ------- np.ndarray or ExtensionArray Containing the unsorted union of both arrays. + + Notes + ----- + Caller is responsible for ensuring lvals.dtype == rvals.dtype. """ indexer = [] l_count = value_counts(lvals, dropna=False) r_count = value_counts(rvals, dropna=False) l_count, r_count = l_count.align(r_count, fill_value=0) - unique_array = unique(np.append(lvals, rvals)) - if not isinstance(lvals, np.ndarray): - # i.e. ExtensionArray - # Note: we only get here with lvals.dtype == rvals.dtype - # TODO: are there any cases where union won't be type/dtype preserving? - unique_array = type(lvals)._from_sequence(unique_array, dtype=lvals.dtype) + unique_array = unique(concat_compat([lvals, rvals])) + unique_array = ensure_wrapped_if_datetimelike(unique_array) + for i, value in enumerate(unique_array): indexer += [i] * int(max(l_count[value], r_count[value])) return unique_array.take(indexer) diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index c7fce9fff3631..28f415476d3fd 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -1,7 +1,10 @@ """ Utility functions related to concat. """ -from typing import cast +from typing import ( + TYPE_CHECKING, + cast, +) import numpy as np @@ -10,7 +13,10 @@ DtypeObj, ) -from pandas.core.dtypes.cast import find_common_type +from pandas.core.dtypes.cast import ( + astype_array, + find_common_type, +) from pandas.core.dtypes.common import ( is_categorical_dtype, is_dtype_equal, @@ -19,15 +25,12 @@ from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ( ABCCategoricalIndex, + ABCExtensionArray, ABCSeries, ) -from pandas.core.arrays import ExtensionArray -from pandas.core.arrays.sparse import SparseArray -from pandas.core.construction import ( - array as pd_array, - ensure_wrapped_if_datetimelike, -) +if TYPE_CHECKING: + from pandas.core.arrays.sparse import SparseArray def cast_to_common_type(arr: ArrayLike, dtype: DtypeObj) -> ArrayLike: @@ -59,26 +62,11 @@ def cast_to_common_type(arr: ArrayLike, dtype: DtypeObj) -> ArrayLike: # SupportsDType[dtype[Any]], str, Union[Tuple[Any, int], Tuple[Any, # Union[SupportsIndex, Sequence[SupportsIndex]]], List[Any], _DTypeDict, # Tuple[Any, Any]]]" [arg-type] - arr = cast(SparseArray, arr) + arr = cast("SparseArray", arr) return arr.to_dense().astype(dtype, copy=False) # type: ignore[arg-type] - if ( - isinstance(arr, np.ndarray) - and arr.dtype.kind in ["m", "M"] - and dtype is np.dtype("object") - ): - # wrap datetime-likes in EA to ensure astype(object) gives Timestamp/Timedelta - # this can happen when concat_compat is called directly on arrays (when arrays - # are not coming from Index/Series._values), eg in BlockManager.quantile - arr = ensure_wrapped_if_datetimelike(arr) - - if isinstance(dtype, ExtensionDtype): - if isinstance(arr, np.ndarray): - # numpy's astype cannot handle ExtensionDtypes - return pd_array(arr, dtype=dtype, copy=False) - return arr.astype(dtype, copy=False) - - return arr.astype(dtype, copy=False) + # astype_array includes ensure_wrapped_if_datetimelike + return astype_array(arr, dtype=dtype, copy=False) def concat_compat(to_concat, axis: int = 0, ea_compat_axis: bool = False): @@ -135,7 +123,8 @@ def is_nonempty(x) -> bool: target_dtype = find_common_type([x.dtype for x in to_concat]) to_concat = [cast_to_common_type(arr, target_dtype) for arr in to_concat] - if isinstance(to_concat[0], ExtensionArray): + if isinstance(to_concat[0], ABCExtensionArray): + # TODO: what about EA-backed Index? cls = type(to_concat[0]) return cls._concat_same_type(to_concat) else: @@ -346,6 +335,8 @@ def _concat_datetime(to_concat, axis=0): ------- a single array, preserving the combined dtypes """ + from pandas.core.construction import ensure_wrapped_if_datetimelike + to_concat = [ensure_wrapped_if_datetimelike(x) for x in to_concat] single_dtype = len({x.dtype for x in to_concat}) == 1
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44125
2021-10-20T23:37:53Z
2021-10-21T01:49:59Z
2021-10-21T01:49:59Z
2021-10-21T01:52:43Z
CI: Use Windows and MacOS Numpy Wheels
diff --git a/.github/workflows/python-dev.yml b/.github/workflows/python-dev.yml index b32b18b86e9df..4fe58ad4d60e9 100644 --- a/.github/workflows/python-dev.yml +++ b/.github/workflows/python-dev.yml @@ -30,9 +30,6 @@ jobs: name: actions-310-dev timeout-minutes: 60 - env: - NUMPY_WHEELS_AVAILABLE: ${{ matrix.os == 'ubuntu-latest' }} - concurrency: group: ${{ github.ref }}-${{ matrix.os }}-dev cancel-in-progress: ${{github.event_name == 'pull_request'}} @@ -51,11 +48,7 @@ jobs: shell: bash run: | python -m pip install --upgrade pip setuptools wheel - if [[ "$NUMPY_WHEELS_AVAILABLE" == "true" ]]; then - pip install -i https://pypi.anaconda.org/scipy-wheels-nightly/simple numpy - else - pip install git+https://github.com/numpy/numpy.git - fi + pip install -i https://pypi.anaconda.org/scipy-wheels-nightly/simple numpy pip install git+https://github.com/nedbat/coveragepy.git pip install cython python-dateutil pytz hypothesis pytest>=6.2.5 pytest-xdist pytest-cov pip list
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44124
2021-10-20T23:13:49Z
2021-10-21T01:00:43Z
2021-10-21T01:00:43Z
2021-10-21T01:14:35Z
BUG: DataFrame index name no longer resets after appending a list of series
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 2a718fdcf16e7..f9acfba8100c9 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -614,6 +614,7 @@ Reshaping - Bug in :func:`concat` which ignored the ``sort`` parameter (:issue:`43375`) - Fixed bug in :func:`merge` with :class:`MultiIndex` as column index for the ``on`` argument returning an error when assigning a column internally (:issue:`43734`) - Bug in :func:`crosstab` would fail when inputs are lists or tuples (:issue:`44076`) +- Bug in :meth:`DataFrame.append` failing to retain ``index.name`` when appending a list of :class:`Series` objects (:issue:`44109`) - Fixed metadata propagation in :meth:`Dataframe.apply` method, consequently fixing the same issue for :meth:`Dataframe.transform`, :meth:`Dataframe.nunique` and :meth:`Dataframe.mode` (:issue:`28283`) Sparse diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 5c24c57925393..f75a6c1061907 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -9086,6 +9086,8 @@ def append( pass elif not isinstance(other[0], DataFrame): other = DataFrame(other) + if self.index.name is not None and not ignore_index: + other.index.name = self.index.name from pandas.core.reshape.concat import concat diff --git a/pandas/tests/frame/methods/test_append.py b/pandas/tests/frame/methods/test_append.py index fae901b7ba303..c29b247cc6e17 100644 --- a/pandas/tests/frame/methods/test_append.py +++ b/pandas/tests/frame/methods/test_append.py @@ -96,6 +96,26 @@ def test_append_list_of_series_dicts(self): expected = df.append(DataFrame(dicts), ignore_index=True, sort=True) tm.assert_frame_equal(result, expected) + def test_append_list_retain_index_name(self): + df = DataFrame( + [[1, 2], [3, 4]], index=pd.Index(["a", "b"], name="keepthisname") + ) + + serc = Series([5, 6], name="c") + + expected = DataFrame( + [[1, 2], [3, 4], [5, 6]], + index=pd.Index(["a", "b", "c"], name="keepthisname"), + ) + + # append series + result = df.append(serc) + tm.assert_frame_equal(result, expected) + + # append list of series + result = df.append([serc]) + tm.assert_frame_equal(result, expected) + def test_append_missing_cols(self): # GH22252 # exercise the conditional branch in append method where the data
- [x] closes #44109 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44123
2021-10-20T22:58:22Z
2021-11-05T00:52:46Z
2021-11-05T00:52:46Z
2022-01-25T21:24:22Z
REF: reuse self.grouper.result_index
diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx index 1331fc07386fb..7ff0842678d7f 100644 --- a/pandas/_libs/reduction.pyx +++ b/pandas/_libs/reduction.pyx @@ -30,8 +30,4 @@ cpdef inline extract_result(object res): if res.ndim == 1 and len(res) == 1: # see test_agg_lambda_with_timezone, test_resampler_grouper.py::test_apply res = res[0] - if is_array(res): - if res.ndim == 1 and len(res) == 1: - # see test_resampler_grouper.py::test_apply - res = res[0] return res diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 3b54918ae99c1..5f2ee996c26b4 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -287,7 +287,8 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs) # see test_groupby.test_basic result = self._aggregate_named(func, *args, **kwargs) - index = Index(sorted(result), name=self.grouper.names[0]) + # result is a dict whose keys are the elements of result_index + index = self.grouper.result_index return create_series_with_explicit_dtype( result, index=index, dtype_if_empty=object )
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44122
2021-10-20T22:48:53Z
2021-10-21T00:49:40Z
2021-10-21T00:49:40Z
2021-10-21T01:49:06Z
PERF: new_block_2d
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 44271907dd75a..de612b367f78f 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1289,7 +1289,8 @@ def _unstack( new_values = new_values.T[mask] new_placement = new_placement[mask] - blocks = [new_block(new_values, placement=new_placement, ndim=2)] + bp = BlockPlacement(new_placement) + blocks = [new_block_2d(new_values, placement=bp)] return blocks, mask @final @@ -1318,7 +1319,7 @@ def quantile( assert is_list_like(qs) # caller is responsible for this result = quantile_compat(self.values, np.asarray(qs._values), interpolation) - return new_block(result, placement=self._mgr_locs, ndim=2) + return new_block_2d(result, placement=self._mgr_locs) class EABackedBlock(Block): @@ -1941,6 +1942,17 @@ def get_block_type(dtype: DtypeObj): return cls +def new_block_2d(values: ArrayLike, placement: BlockPlacement): + # new_block specialized to case with + # ndim=2 + # isinstance(placement, BlockPlacement) + # check_ndim/ensure_block_shape already checked + klass = get_block_type(values.dtype) + + values = maybe_coerce_values(values) + return klass(values, ndim=2, placement=placement) + + def new_block(values, placement, *, ndim: int) -> Block: # caller is responsible for ensuring values is NOT a PandasArray diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index e1a51e5783cb8..7687e60db8552 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -48,7 +48,7 @@ ) from pandas.core.internals.blocks import ( ensure_block_shape, - new_block, + new_block_2d, ) from pandas.core.internals.managers import BlockManager @@ -224,11 +224,11 @@ def concatenate_managers( # _is_uniform_join_units ensures a single dtype, so # we can use np.concatenate, which is more performant # than concat_compat - values = np.concatenate(vals, axis=blk.ndim - 1) + values = np.concatenate(vals, axis=1) else: # TODO(EA2D): special-casing not needed with 2D EAs values = concat_compat(vals, axis=1) - values = ensure_block_shape(values, blk.ndim) + values = ensure_block_shape(values, ndim=2) values = ensure_wrapped_if_datetimelike(values) @@ -240,7 +240,7 @@ def concatenate_managers( if fastpath: b = blk.make_block_same_class(values, placement=placement) else: - b = new_block(values, placement=placement, ndim=len(axes)) + b = new_block_2d(values, placement=placement) blocks.append(b) diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index c1fd29615e1bc..74388e0b2b91e 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -79,8 +79,9 @@ SingleArrayManager, ) from pandas.core.internals.blocks import ( + BlockPlacement, ensure_block_shape, - new_block, + new_block_2d, ) from pandas.core.internals.managers import ( BlockManager, @@ -370,14 +371,16 @@ def ndarray_to_mgr( if any(x is not y for x, y in zip(obj_columns, maybe_datetime)): dvals_list = [ensure_block_shape(dval, 2) for dval in maybe_datetime] block_values = [ - new_block(dvals_list[n], placement=n, ndim=2) + new_block_2d(dvals_list[n], placement=BlockPlacement(n)) for n in range(len(dvals_list)) ] else: - nb = new_block(values, placement=slice(len(columns)), ndim=2) + bp = BlockPlacement(slice(len(columns))) + nb = new_block_2d(values, placement=bp) block_values = [nb] else: - nb = new_block(values, placement=slice(len(columns)), ndim=2) + bp = BlockPlacement(slice(len(columns))) + nb = new_block_2d(values, placement=bp) block_values = [nb] if len(columns) == 0: diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index f34926f727c3f..d6c1f372807ba 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -72,6 +72,7 @@ extend_blocks, get_block_type, new_block, + new_block_2d, ) from pandas.core.internals.ops import ( blockwise_all, @@ -824,6 +825,7 @@ def _slice_take_blocks_ax0( def _make_na_block( self, placement: BlockPlacement, fill_value=None, use_na_proxy: bool = False ) -> Block: + # Note: we only get here with self.ndim == 2 if use_na_proxy: assert fill_value is None @@ -844,7 +846,7 @@ def _make_na_block( # Tuple[Any, Any]]" block_values = np.empty(block_shape, dtype=dtype) # type: ignore[arg-type] block_values.fill(fill_value) - return new_block(block_values, placement=placement, ndim=block_values.ndim) + return new_block_2d(block_values, placement=placement) def take(self: T, indexer, axis: int = 1, verify: bool = True) -> T: """ @@ -1106,8 +1108,8 @@ def value_getitem(placement): ) if unfit_val_locs: - unfit_mgr_locs = np.concatenate(unfit_mgr_locs) - unfit_count = len(unfit_mgr_locs) + unfit_idxr = np.concatenate(unfit_mgr_locs) + unfit_count = len(unfit_idxr) new_blocks: list[Block] = [] if value_is_extension_type: @@ -1115,31 +1117,29 @@ def value_getitem(placement): # one item. # TODO(EA2D): special casing unnecessary with 2D EAs new_blocks.extend( - new_block( + new_block_2d( values=value, - ndim=self.ndim, - placement=slice(mgr_loc, mgr_loc + 1), + placement=BlockPlacement(slice(mgr_loc, mgr_loc + 1)), ) - for mgr_loc in unfit_mgr_locs + for mgr_loc in unfit_idxr ) - self._blknos[unfit_mgr_locs] = np.arange(unfit_count) + len(self.blocks) - self._blklocs[unfit_mgr_locs] = 0 + self._blknos[unfit_idxr] = np.arange(unfit_count) + len(self.blocks) + self._blklocs[unfit_idxr] = 0 else: # unfit_val_locs contains BlockPlacement objects unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:]) new_blocks.append( - new_block( + new_block_2d( values=value_getitem(unfit_val_items), - ndim=self.ndim, - placement=unfit_mgr_locs, + placement=BlockPlacement(unfit_idxr), ) ) - self._blknos[unfit_mgr_locs] = len(self.blocks) - self._blklocs[unfit_mgr_locs] = np.arange(unfit_count) + self._blknos[unfit_idxr] = len(self.blocks) + self._blklocs[unfit_idxr] = np.arange(unfit_count) self.blocks += tuple(new_blocks) @@ -1161,10 +1161,15 @@ def insert(self, loc: int, item: Hashable, value: ArrayLike) -> None: if value.ndim == 2: value = value.T + if len(value) > 1: + raise ValueError( + f"Expected a 1D array, got an array with shape {value.T.shape}" + ) else: value = ensure_block_shape(value, ndim=self.ndim) - block = new_block(values=value, ndim=self.ndim, placement=slice(loc, loc + 1)) + bp = BlockPlacement(slice(loc, loc + 1)) + block = new_block_2d(values=value, placement=bp) self._insert_update_mgr_locs(loc) self._insert_update_blklocs_and_blknos(loc) @@ -1968,7 +1973,7 @@ def _form_blocks(arrays: list[ArrayLike], consolidate: bool) -> list[Block]: def _tuples_to_blocks_no_consolidate(tuples) -> list[Block]: # tuples produced within _form_blocks are of the form (placement, array) return [ - new_block(ensure_block_shape(x[1], ndim=2), placement=x[0], ndim=2) + new_block_2d(ensure_block_shape(x[1], ndim=2), placement=BlockPlacement(x[0])) for x in tuples ] @@ -2035,7 +2040,7 @@ def _merge_blocks( new_mgr_locs = new_mgr_locs[argsort] bp = BlockPlacement(new_mgr_locs) - return [new_block(new_values, placement=bp, ndim=2)] + return [new_block_2d(new_values, placement=bp)] # can't consolidate --> no merge return blocks diff --git a/pandas/tests/frame/indexing/test_insert.py b/pandas/tests/frame/indexing/test_insert.py index c2c862be42625..213c6e6903a89 100644 --- a/pandas/tests/frame/indexing/test_insert.py +++ b/pandas/tests/frame/indexing/test_insert.py @@ -93,9 +93,7 @@ def test_insert_item_cache(self, using_array_manager): def test_insert_frame(self): # GH#42403 df = DataFrame({"col1": [1, 2], "col2": [3, 4]}) - msg = ( - "Expected a 1D array, got an array with shape " - r"\(2, 2\)|Wrong number of items passed 2, placement implies 1" - ) + + msg = r"Expected a 1D array, got an array with shape \(2, 2\)" with pytest.raises(ValueError, match=msg): df.insert(1, "newcol", df) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 859f5171a6f04..02ff93bf67a4f 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -222,12 +222,7 @@ def test_constructor_cast_failure(self): df["foo"] = np.ones((4, 2)).tolist() # this is not ok - msg = "|".join( - [ - "Wrong number of items passed 2, placement implies 1", - "Expected a 1D array, got an array with shape \\(4, 2\\)", - ] - ) + msg = "Expected a 1D array, got an array with shape \\(4, 2\\)" with pytest.raises(ValueError, match=msg): df["test"] = np.ones((4, 2))
Avoids some redundant checks
https://api.github.com/repos/pandas-dev/pandas/pulls/44121
2021-10-20T22:05:09Z
2021-10-21T01:06:18Z
2021-10-21T01:06:18Z
2021-10-21T01:56:58Z
BUG: ignore EAs when counting blocks to raise fragmentation PerformanceWarning
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 17b9e49bcad6a..c7b3874388d73 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -617,6 +617,7 @@ ExtensionArray ^^^^^^^^^^^^^^ - Bug in :func:`array` failing to preserve :class:`PandasArray` (:issue:`43887`) - NumPy ufuncs ``np.abs``, ``np.positive``, ``np.negative`` now correctly preserve dtype when called on ExtensionArrays that implement ``__abs__, __pos__, __neg__``, respectively. In particular this is fixed for :class:`TimedeltaArray` (:issue:`43899`) +- Avoid raising ``PerformanceWarning`` about fragmented DataFrame when using many columns with an extension dtype (:issue:`44098`) - Styler diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index f34926f727c3f..a932728462a31 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1174,7 +1174,7 @@ def insert(self, loc: int, item: Hashable, value: ArrayLike) -> None: self._known_consolidated = False - if len(self.blocks) > 100: + if sum(not block.is_extension for block in self.blocks) > 100: warnings.warn( "DataFrame is highly fragmented. This is usually the result " "of calling `frame.insert` many times, which has poor performance. " diff --git a/pandas/tests/frame/indexing/test_insert.py b/pandas/tests/frame/indexing/test_insert.py index c2c862be42625..90dace98b1694 100644 --- a/pandas/tests/frame/indexing/test_insert.py +++ b/pandas/tests/frame/indexing/test_insert.py @@ -90,6 +90,13 @@ def test_insert_item_cache(self, using_array_manager): assert df.iloc[0, 0] == df[0][0] + def test_insert_EA_no_warning(self): + # PerformanceWarning about fragmented frame should not be raised when + # using EAs (https://github.com/pandas-dev/pandas/issues/44098) + df = DataFrame(np.random.randint(0, 100, size=(3, 100)), dtype="Int64") + with tm.assert_produces_warning(None): + df["a"] = np.array([1, 2, 3]) + def test_insert_frame(self): # GH#42403 df = DataFrame({"col1": [1, 2], "col2": [3, 4]})
Closes #44098
https://api.github.com/repos/pandas-dev/pandas/pulls/44115
2021-10-20T16:36:05Z
2021-10-21T00:51:10Z
2021-10-21T00:51:10Z
2021-10-21T06:04:48Z
CLN: clean TODOs in csv test
diff --git a/pandas/tests/frame/methods/test_to_csv.py b/pandas/tests/frame/methods/test_to_csv.py index be84eb9c85663..8a857c033a2de 100644 --- a/pandas/tests/frame/methods/test_to_csv.py +++ b/pandas/tests/frame/methods/test_to_csv.py @@ -142,8 +142,6 @@ def test_to_csv_from_csv4(self): result = read_csv(path, index_col="dt_index") result.index = pd.to_timedelta(result.index) - # TODO: remove renaming when GH 10875 is solved - result.index = result.index.rename("dt_index") result["dt_data"] = pd.to_timedelta(result["dt_data"]) tm.assert_frame_equal(df, result, check_index_type=True) @@ -479,11 +477,8 @@ def test_to_csv_from_csv_w_some_infs(self, float_frame): float_frame.to_csv(path) recons = self.read_csv(path) - # TODO to_csv drops column name - tm.assert_frame_equal(float_frame, recons, check_names=False) - tm.assert_frame_equal( - np.isinf(float_frame), np.isinf(recons), check_names=False - ) + tm.assert_frame_equal(float_frame, recons) + tm.assert_frame_equal(np.isinf(float_frame), np.isinf(recons)) def test_to_csv_from_csv_w_all_infs(self, float_frame): @@ -495,11 +490,8 @@ def test_to_csv_from_csv_w_all_infs(self, float_frame): float_frame.to_csv(path) recons = self.read_csv(path) - # TODO to_csv drops column name - tm.assert_frame_equal(float_frame, recons, check_names=False) - tm.assert_frame_equal( - np.isinf(float_frame), np.isinf(recons), check_names=False - ) + tm.assert_frame_equal(float_frame, recons) + tm.assert_frame_equal(np.isinf(float_frame), np.isinf(recons)) def test_to_csv_no_index(self): # GH 3624, after appending columns, to_csv fails @@ -868,8 +860,7 @@ def test_to_csv_stringio(self, float_frame): float_frame.to_csv(buf) buf.seek(0) recons = read_csv(buf, index_col=0) - # TODO to_csv drops column name - tm.assert_frame_equal(recons, float_frame, check_names=False) + tm.assert_frame_equal(recons, float_frame) def test_to_csv_float_format(self):
null
https://api.github.com/repos/pandas-dev/pandas/pulls/44113
2021-10-20T12:41:09Z
2021-10-20T17:25:26Z
2021-10-20T17:25:26Z
2022-11-18T02:20:52Z
DOC: ewm constructor
diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py index c697db9ea313e..c17af442fe2cc 100644 --- a/pandas/core/window/ewm.py +++ b/pandas/core/window/ewm.py @@ -126,9 +126,7 @@ def _calculate_deltas( class ExponentialMovingWindow(BaseWindow): r""" - Provide exponential weighted (EW) functions. - - Available EW functions: ``mean()``, ``var()``, ``std()``, ``corr()``, ``cov()``. + Provide exponentially weighted (EW) calculations. Exactly one parameter: ``com``, ``span``, ``halflife``, or ``alpha`` must be provided. @@ -136,28 +134,36 @@ class ExponentialMovingWindow(BaseWindow): Parameters ---------- com : float, optional - Specify decay in terms of center of mass, + Specify decay in terms of center of mass + :math:`\alpha = 1 / (1 + com)`, for :math:`com \geq 0`. + span : float, optional - Specify decay in terms of span, + Specify decay in terms of span + :math:`\alpha = 2 / (span + 1)`, for :math:`span \geq 1`. + halflife : float, str, timedelta, optional - Specify decay in terms of half-life, + Specify decay in terms of half-life + :math:`\alpha = 1 - \exp\left(-\ln(2) / halflife\right)`, for :math:`halflife > 0`. If ``times`` is specified, the time unit (str or timedelta) over which an - observation decays to half its value. Only applicable to ``mean()`` + observation decays to half its value. Only applicable to ``mean()``, and halflife value will not apply to the other functions. .. versionadded:: 1.1.0 alpha : float, optional - Specify smoothing factor :math:`\alpha` directly, + Specify smoothing factor :math:`\alpha` directly + :math:`0 < \alpha \leq 1`. + min_periods : int, default 0 - Minimum number of observations in window required to have a value - (otherwise result is NA). + Minimum number of observations in window required to have a value; + otherwise, result is ``np.nan``. + adjust : bool, default True Divide by decaying adjustment factor in beginning periods to account for imbalance in relative weightings (viewing EWMA as a moving average). @@ -179,8 +185,7 @@ class ExponentialMovingWindow(BaseWindow): y_t &= (1 - \alpha) y_{t-1} + \alpha x_t, \end{split} ignore_na : bool, default False - Ignore missing values when calculating weights; specify ``True`` to reproduce - pre-0.15.0 behavior. + Ignore missing values when calculating weights. - When ``ignore_na=False`` (default), weights are based on absolute positions. For example, the weights of :math:`x_0` and :math:`x_2` used in calculating @@ -188,29 +193,34 @@ class ExponentialMovingWindow(BaseWindow): :math:`(1-\alpha)^2` and :math:`1` if ``adjust=True``, and :math:`(1-\alpha)^2` and :math:`\alpha` if ``adjust=False``. - - When ``ignore_na=True`` (reproducing pre-0.15.0 behavior), weights are based + - When ``ignore_na=True``, weights are based on relative positions. For example, the weights of :math:`x_0` and :math:`x_2` used in calculating the final weighted average of [:math:`x_0`, None, :math:`x_2`] are :math:`1-\alpha` and :math:`1` if ``adjust=True``, and :math:`1-\alpha` and :math:`\alpha` if ``adjust=False``. + axis : {0, 1}, default 0 - The axis to use. The value 0 identifies the rows, and 1 - identifies the columns. + If ``0`` or ``'index'``, calculate across the rows. + + If ``1`` or ``'columns'``, calculate across the columns. + times : str, np.ndarray, Series, default None .. versionadded:: 1.1.0 + Only applicable to ``mean()``. + Times corresponding to the observations. Must be monotonically increasing and ``datetime64[ns]`` dtype. - If str, the name of the column in the DataFrame representing the times. + If 1-D array like, a sequence with the same shape as the observations. .. deprecated:: 1.4.0 + If str, the name of the column in the DataFrame representing the times. - If 1-D array like, a sequence with the same shape as the observations. - - Only applicable to ``mean()``. method : str {'single', 'table'}, default 'single' + .. versionadded:: 1.4.0 + Execute the rolling operation per single column or row (``'single'``) or over the entire object (``'table'``). @@ -219,12 +229,9 @@ class ExponentialMovingWindow(BaseWindow): Only applicable to ``mean()`` - .. versionadded:: 1.4.0 - Returns ------- - DataFrame - A Window sub-classed for the particular operation. + ``ExponentialMovingWindow`` subclass See Also -------- @@ -233,9 +240,8 @@ class ExponentialMovingWindow(BaseWindow): Notes ----- - - More details can be found at: - :ref:`Exponentially weighted windows <window.exponentially_weighted>`. + See :ref:`Windowing Operations <window.exponentially_weighted>` + for further usage details and examples. Examples -------- @@ -255,8 +261,52 @@ class ExponentialMovingWindow(BaseWindow): 2 1.615385 3 1.615385 4 3.670213 + >>> df.ewm(alpha=2 / 3).mean() + B + 0 0.000000 + 1 0.750000 + 2 1.615385 + 3 1.615385 + 4 3.670213 + + **adjust** + + >>> df.ewm(com=0.5, adjust=True).mean() + B + 0 0.000000 + 1 0.750000 + 2 1.615385 + 3 1.615385 + 4 3.670213 + >>> df.ewm(com=0.5, adjust=False).mean() + B + 0 0.000000 + 1 0.666667 + 2 1.555556 + 3 1.555556 + 4 3.650794 + + **ignore_na** + + >>> df.ewm(com=0.5, ignore_na=True).mean() + B + 0 0.000000 + 1 0.750000 + 2 1.615385 + 3 1.615385 + 4 3.225000 + >>> df.ewm(com=0.5, ignore_na=False).mean() + B + 0 0.000000 + 1 0.750000 + 2 1.615385 + 3 1.615385 + 4 3.670213 + + **times** - Specifying ``times`` with a timedelta ``halflife`` when computing mean. + Exponentially weighted mean with weights calculated with a timedelta ``halflife`` + relative to ``times``. >>> times = ['2020-01-01', '2020-01-03', '2020-01-10', '2020-01-15', '2020-01-17'] >>> df.ewm(halflife='4 days', times=pd.DatetimeIndex(times)).mean()
- [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them * Made language more consistent in the Parameters * Organized and added more Examples
https://api.github.com/repos/pandas-dev/pandas/pulls/44111
2021-10-20T04:51:02Z
2021-10-21T00:58:05Z
2021-10-21T00:58:05Z
2021-10-21T09:17:19Z
DOC: expanding constructor
diff --git a/pandas/core/window/expanding.py b/pandas/core/window/expanding.py index 58662e44b9887..796849e622ff2 100644 --- a/pandas/core/window/expanding.py +++ b/pandas/core/window/expanding.py @@ -44,16 +44,26 @@ class Expanding(RollingAndExpandingMixin): """ - Provide expanding transformations. + Provide expanding window calculations. Parameters ---------- min_periods : int, default 1 - Minimum number of observations in window required to have a value - (otherwise result is NA). + Minimum number of observations in window required to have a value; + otherwise, result is ``np.nan``. + center : bool, default False - Set the labels at the center of the window. + If False, set the window labels as the right edge of the window index. + + If True, set the window labels as the center of the window index. + + .. deprecated:: 1.1.0 + axis : int or str, default 0 + If ``0`` or ``'index'``, roll across the rows. + + If ``1`` or ``'columns'``, roll across the columns. + method : str {'single', 'table'}, default 'single' Execute the rolling operation per single column or row (``'single'``) or over the entire object (``'table'``). @@ -65,7 +75,7 @@ class Expanding(RollingAndExpandingMixin): Returns ------- - a Window sub-classed for the particular operation + ``Expanding`` subclass See Also -------- @@ -74,8 +84,8 @@ class Expanding(RollingAndExpandingMixin): Notes ----- - By default, the result is set to the right edge of the window. This can be - changed to the center of the window by setting ``center=True``. + See :ref:`Windowing Operations <window.expanding>` for further usage details + and examples. Examples -------- @@ -88,13 +98,24 @@ class Expanding(RollingAndExpandingMixin): 3 NaN 4 4.0 - >>> df.expanding(2).sum() + **min_periods** + + Expanding sum with 1 vs 3 observations needed to calculate a value. + + >>> df.expanding(1).sum() B - 0 NaN + 0 0.0 1 1.0 2 3.0 3 3.0 4 7.0 + >>> df.expanding(3).sum() + B + 0 NaN + 1 NaN + 2 3.0 + 3 3.0 + 4 7.0 """ _attributes: list[str] = ["min_periods", "center", "axis", "method"]
- [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them * Made language more consistent in the Parameters * Organized and added more Examples * Added missing deprecation tag for `center`
https://api.github.com/repos/pandas-dev/pandas/pulls/44110
2021-10-20T04:12:41Z
2021-10-21T00:54:52Z
2021-10-21T00:54:52Z
2021-10-21T01:48:30Z
DOC: rolling constructor
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 89be1f4206939..39aa5da95cc29 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -840,52 +840,89 @@ class Window(BaseWindow): Parameters ---------- window : int, offset, or BaseIndexer subclass - Size of the moving window. This is the number of observations used for - calculating the statistic. Each window will be a fixed size. + Size of the moving window. - If its an offset then this will be the time period of each window. Each + If an integer, the fixed number of observations used for + each window. + + If an offset, the time period of each window. Each window will be a variable sized based on the observations included in the time-period. This is only valid for datetimelike indexes. + To learn more about the offsets & frequency strings, please see `this link + <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. - If a BaseIndexer subclass is passed, calculates the window boundaries + If a BaseIndexer subclass, the window boundaries based on the defined ``get_window_bounds`` method. Additional rolling - keyword arguments, namely `min_periods`, `center`, and - `closed` will be passed to `get_window_bounds`. + keyword arguments, namely ``min_periods``, ``center``, and + ``closed`` will be passed to ``get_window_bounds``. + min_periods : int, default None - Minimum number of observations in window required to have a value - (otherwise result is NA). For a window that is specified by an offset, - `min_periods` will default to 1. Otherwise, `min_periods` will default + Minimum number of observations in window required to have a value; + otherwise, result is ``np.nan``. + + For a window that is specified by an offset, ``min_periods`` will default to 1. + + For a window that is specified by an integer, ``min_periods`` will default to the size of the window. + center : bool, default False - Set the labels at the center of the window. + If False, set the window labels as the right edge of the window index. + + If True, set the window labels as the center of the window index. + win_type : str, default None - Provide a window type. If ``None``, all points are evenly weighted. - See the notes below for further information. + If ``None``, all points are evenly weighted. + + If a string, it must be a valid `scipy.signal window function + <https://docs.scipy.org/doc/scipy/reference/signal.windows.html#module-scipy.signal.windows>`__. + + Certain Scipy window types require additional parameters to be passed + in the aggregation function. The additional parameters must match + the keywords specified in the Scipy window type method signature. + on : str, optional - For a DataFrame, a datetime-like column or Index level on which + For a DataFrame, a column label or Index level on which to calculate the rolling window, rather than the DataFrame's index. + Provided integer column is ignored and excluded from result since an integer index is not used to calculate the rolling window. + axis : int or str, default 0 + If ``0`` or ``'index'``, roll across the rows. + + If ``1`` or ``'columns'``, roll across the columns. + closed : str, default None - Make the interval closed on the 'right', 'left', 'both' or - 'neither' endpoints. Defaults to 'right'. + If ``'right'``, the first point in the window is excluded from calculations. + + If ``'left'``, the last point in the window is excluded from calculations. + + If ``'both'``, the no points in the window are excluded from calculations. + + If ``'neither'``, the first and last points in the window are excluded + from calculations. + + Default ``None`` (``'right'``) .. versionchanged:: 1.2.0 The closed parameter with fixed windows is now supported. + method : str {'single', 'table'}, default 'single' + + .. versionadded:: 1.3.0 + Execute the rolling operation per single column or row (``'single'``) or over the entire object (``'table'``). This argument is only implemented when specifying ``engine='numba'`` in the method call. - .. versionadded:: 1.3.0 - Returns ------- - a Window or Rolling sub-classed for the particular operation + ``Window`` subclass if a ``win_type`` is passed + + ``Rolling`` subclass if ``win_type`` is not passed See Also -------- @@ -894,20 +931,8 @@ class Window(BaseWindow): Notes ----- - By default, the result is set to the right edge of the window. This can be - changed to the center of the window by setting ``center=True``. - - To learn more about the offsets & frequency strings, please see `this link - <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. - - If ``win_type=None``, all points are evenly weighted; otherwise, ``win_type`` - can accept a string of any `scipy.signal window function - <https://docs.scipy.org/doc/scipy/reference/signal.windows.html#module-scipy.signal.windows>`__. - - Certain Scipy window types require additional parameters to be passed - in the aggregation function. The additional parameters must match - the keywords specified in the Scipy window type method signature. - Please see the third example below on how to add the additional parameters. + See :ref:`Windowing Operations <window.generic>` for further usage details + and examples. Examples -------- @@ -920,40 +945,58 @@ class Window(BaseWindow): 3 NaN 4 4.0 - Rolling sum with a window length of 2, using the 'triang' - window type. + **window** - >>> df.rolling(2, win_type='triang').sum() + Rolling sum with a window length of 2 observations. + + >>> df.rolling(2).sum() B 0 NaN - 1 0.5 - 2 1.5 + 1 1.0 + 2 3.0 3 NaN 4 NaN - Rolling sum with a window length of 2, using the 'gaussian' - window type (note how we need to specify std). + Rolling sum with a window span of 2 seconds. - >>> df.rolling(2, win_type='gaussian').sum(std=3) - B - 0 NaN - 1 0.986207 - 2 2.958621 - 3 NaN - 4 NaN + >>> df_time = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}, + ... index = [pd.Timestamp('20130101 09:00:00'), + ... pd.Timestamp('20130101 09:00:02'), + ... pd.Timestamp('20130101 09:00:03'), + ... pd.Timestamp('20130101 09:00:05'), + ... pd.Timestamp('20130101 09:00:06')]) - Rolling sum with a window length of 2, min_periods defaults - to the window length. + >>> df_time + B + 2013-01-01 09:00:00 0.0 + 2013-01-01 09:00:02 1.0 + 2013-01-01 09:00:03 2.0 + 2013-01-01 09:00:05 NaN + 2013-01-01 09:00:06 4.0 - >>> df.rolling(2).sum() + >>> df_time.rolling('2s').sum() + B + 2013-01-01 09:00:00 0.0 + 2013-01-01 09:00:02 1.0 + 2013-01-01 09:00:03 3.0 + 2013-01-01 09:00:05 NaN + 2013-01-01 09:00:06 4.0 + + Rolling sum with forward looking windows with 2 observations. + + >>> indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=2) + >>> df.rolling(window=indexer, min_periods=1).sum() B - 0 NaN - 1 1.0 - 2 3.0 - 3 NaN - 4 NaN + 0 1.0 + 1 3.0 + 2 2.0 + 3 4.0 + 4 4.0 + + **min_periods** - Same as above, but explicitly set the min_periods + Rolling sum with a window length of 2 observations, but only needs a minimum of 1 + observation to calculate a value. >>> df.rolling(2, min_periods=1).sum() B @@ -963,45 +1006,38 @@ class Window(BaseWindow): 3 2.0 4 4.0 - Same as above, but with forward-looking windows + **center** - >>> indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=2) - >>> df.rolling(window=indexer, min_periods=1).sum() + Rolling sum with the result assigned to the center of the window index. + + >>> df.rolling(3, min_periods=1, center=True).sum() B 0 1.0 1 3.0 - 2 2.0 - 3 4.0 + 2 3.0 + 3 6.0 4 4.0 - A ragged (meaning not-a-regular frequency), time-indexed DataFrame - - >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}, - ... index = [pd.Timestamp('20130101 09:00:00'), - ... pd.Timestamp('20130101 09:00:02'), - ... pd.Timestamp('20130101 09:00:03'), - ... pd.Timestamp('20130101 09:00:05'), - ... pd.Timestamp('20130101 09:00:06')]) + >>> df.rolling(3, min_periods=1, center=False).sum() + B + 0 0.0 + 1 1.0 + 2 3.0 + 3 3.0 + 4 6.0 - >>> df - B - 2013-01-01 09:00:00 0.0 - 2013-01-01 09:00:02 1.0 - 2013-01-01 09:00:03 2.0 - 2013-01-01 09:00:05 NaN - 2013-01-01 09:00:06 4.0 + **win_type** - Contrasting to an integer rolling window, this will roll a variable - length window corresponding to the time period. - The default for min_periods is 1. + Rolling sum with a window length of 2, using the Scipy ``'gaussian'`` + window type. ``std`` is required in the aggregation function. - >>> df.rolling('2s').sum() - B - 2013-01-01 09:00:00 0.0 - 2013-01-01 09:00:02 1.0 - 2013-01-01 09:00:03 3.0 - 2013-01-01 09:00:05 NaN - 2013-01-01 09:00:06 4.0 + >>> df.rolling(2, win_type='gaussian').sum(std=3) + B + 0 NaN + 1 0.986207 + 2 2.958621 + 3 NaN + 4 NaN """ _attributes = [
- [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them * Made language more consistent in the `Parameters` * Organized and added more `Examples`
https://api.github.com/repos/pandas-dev/pandas/pulls/44108
2021-10-20T01:22:12Z
2021-10-21T00:56:13Z
2021-10-21T00:56:13Z
2021-10-21T01:48:39Z
BUG: RangeIndex.difference missed case
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 4def2e4b93553..b209a6556709b 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -705,6 +705,10 @@ def _difference(self, other, sort=None): else: return super()._difference(other, sort=sort) + elif len(overlap) == 2 and overlap[0] == first[0] and overlap[-1] == first[-1]: + # e.g. range(-8, 20, 7) and range(13, -9, -3) + return self[1:-1] + if overlap.step == first.step: if overlap[0] == first.start: # The difference is everything after the intersection @@ -712,6 +716,10 @@ def _difference(self, other, sort=None): elif overlap[-1] == first[-1]: # The difference is everything before the intersection new_rng = range(first.start, overlap[0], first.step) + elif overlap._range == first[1:-1]: + # e.g. range(4) and range(1, 3) + step = len(first) - 1 + new_rng = first[::step] else: # The difference is not range-like # e.g. range(1, 10, 1) and range(3, 7, 1) @@ -725,16 +733,19 @@ def _difference(self, other, sort=None): if overlap.step == first.step * 2: if overlap[0] == first[0] and overlap[-1] in (first[-1], first[-2]): # e.g. range(1, 10, 1) and range(1, 10, 2) - return self[1::2] + new_rng = first[1::2] elif overlap[0] == first[1] and overlap[-1] in (first[-1], first[-2]): # e.g. range(1, 10, 1) and range(2, 10, 2) - return self[::2] + new_rng = first[::2] - # We can get here with e.g. range(20) and range(0, 10, 2) + else: + # We can get here with e.g. range(20) and range(0, 10, 2) + return super()._difference(other, sort=sort) - # e.g. range(10) and range(0, 10, 3) - return super()._difference(other, sort=sort) + else: + # e.g. range(10) and range(0, 10, 3) + return super()._difference(other, sort=sort) new_index = type(self)._simple_new(new_rng, name=res_name) if first is not self._range: diff --git a/pandas/tests/indexes/ranges/test_setops.py b/pandas/tests/indexes/ranges/test_setops.py index 583391bd96a85..2942010af2720 100644 --- a/pandas/tests/indexes/ranges/test_setops.py +++ b/pandas/tests/indexes/ranges/test_setops.py @@ -377,6 +377,24 @@ def test_difference_mismatched_step(self): result = obj[::-1].difference(obj[1::2], sort=False) tm.assert_index_equal(result, expected[::-1], exact=True) + def test_difference_interior_overlap_endpoints_preserved(self): + left = RangeIndex(range(4)) + right = RangeIndex(range(1, 3)) + + result = left.difference(right) + expected = RangeIndex(0, 4, 3) + assert expected.tolist() == [0, 3] + tm.assert_index_equal(result, expected, exact=True) + + def test_difference_endpoints_overlap_interior_preserved(self): + left = RangeIndex(-8, 20, 7) + right = RangeIndex(13, -9, -3) + + result = left.difference(right) + expected = RangeIndex(-1, 13, 7) + assert expected.tolist() == [-1, 6] + tm.assert_index_equal(result, expected, exact=True) + def test_difference_interior_non_preserving(self): # case with intersection of length 1 but RangeIndex is not preserved idx = Index(range(10))
Need to figure out why running hypothesis locally (repeatedly) isn't catching the ones caught on the CI.
https://api.github.com/repos/pandas-dev/pandas/pulls/44107
2021-10-19T22:24:16Z
2021-10-20T21:06:43Z
2021-10-20T21:06:43Z
2021-10-20T21:51:49Z
BUG: PeriodIndex[B].to_timestamp inferring "D" instead of "B".
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 99a66c7e5454b..8732e1c397ce5 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -579,6 +579,7 @@ I/O Period ^^^^^^ - Bug in adding a :class:`Period` object to a ``np.timedelta64`` object incorrectly raising ``TypeError`` (:issue:`44182`) +- Bug in :meth:`PeriodIndex.to_timestamp` when the index has ``freq="B"`` inferring ``freq="D"`` for its result instead of ``freq="B"`` (:issue:`44105`) - Plotting diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 2f36b72229225..01018c7263f32 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -12,6 +12,7 @@ import numpy as np +from pandas._libs import algos as libalgos from pandas._libs.arrays import NDArrayBacked from pandas._libs.tslibs import ( BaseOffset, @@ -506,7 +507,22 @@ def to_timestamp(self, freq=None, how: str = "start") -> DatetimeArray: new_parr = self.asfreq(freq, how=how) new_data = libperiod.periodarr_to_dt64arr(new_parr.asi8, base) - return DatetimeArray(new_data)._with_freq("infer") + dta = DatetimeArray(new_data) + + if self.freq.name == "B": + # See if we can retain BDay instead of Day in cases where + # len(self) is too small for infer_freq to distinguish between them + diffs = libalgos.unique_deltas(self.asi8) + if len(diffs) == 1: + diff = diffs[0] + if diff == self.freq.n: + dta._freq = self.freq + elif diff == 1: + dta._freq = self.freq.base + # TODO: other cases? + return dta + else: + return dta._with_freq("infer") # -------------------------------------------------------------------- diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index c7c1ce6c04692..13fe3c2d427c5 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -1114,6 +1114,25 @@ def test_to_timestamp(self, how, arr1d): # an EA-specific tm.assert_ function tm.assert_index_equal(pd.Index(result), pd.Index(expected)) + def test_to_timestamp_roundtrip_bday(self): + # Case where infer_freq inside would choose "D" instead of "B" + dta = pd.date_range("2021-10-18", periods=3, freq="B")._data + parr = dta.to_period() + result = parr.to_timestamp() + assert result.freq == "B" + tm.assert_extension_array_equal(result, dta) + + dta2 = dta[::2] + parr2 = dta2.to_period() + result2 = parr2.to_timestamp() + assert result2.freq == "2B" + tm.assert_extension_array_equal(result2, dta2) + + parr3 = dta.to_period("2B") + result3 = parr3.to_timestamp() + assert result3.freq == "B" + tm.assert_extension_array_equal(result3, dta) + def test_to_timestamp_out_of_bounds(self): # GH#19643 previously overflowed silently pi = pd.period_range("1500", freq="Y", periods=3)
- [ ] closes #xxxx - [x] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44105
2021-10-19T20:15:59Z
2021-11-11T13:41:52Z
2021-11-11T13:41:52Z
2021-11-11T13:41:52Z
DOC: Perf notes
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 026429dabae84..17b9e49bcad6a 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -403,21 +403,46 @@ Performance improvements - Performance improvement in :meth:`.GroupBy.sample`, especially when ``weights`` argument provided (:issue:`34483`) - Performance improvement when converting non-string arrays to string arrays (:issue:`34483`) - Performance improvement in :meth:`.GroupBy.transform` for user-defined functions (:issue:`41598`) -- Performance improvement in constructing :class:`DataFrame` objects (:issue:`42631`) +- Performance improvement in constructing :class:`DataFrame` objects (:issue:`42631`, :issue:`43142`, :issue:`43147`, :issue:`43307`, :issue:`43144`) - Performance improvement in :meth:`GroupBy.shift` when ``fill_value`` argument is provided (:issue:`26615`) - Performance improvement in :meth:`DataFrame.corr` for ``method=pearson`` on data without missing values (:issue:`40956`) -- Performance improvement in some :meth:`GroupBy.apply` operations (:issue:`42992`) -- Performance improvement in :func:`read_stata` (:issue:`43059`) +- Performance improvement in some :meth:`GroupBy.apply` operations (:issue:`42992`, :issue:`43578`) +- Performance improvement in :func:`read_stata` (:issue:`43059`, :issue:`43227`) +- Performance improvement in :func:`read_sas` (:issue:`43333`) - Performance improvement in :meth:`to_datetime` with ``uint`` dtypes (:issue:`42606`) - Performance improvement in :meth:`to_datetime` with ``infer_datetime_format`` set to ``True`` (:issue:`43901`) - Performance improvement in :meth:`Series.sparse.to_coo` (:issue:`42880`) +- Performance improvement in indexing with a :class:`UInt64Index` (:issue:`43862`) +- Performance improvement in indexing with a :class:`Float64Index` (:issue:`43705`) +- Performance improvement in indexing with a non-unique Index (:issue:`43792`) +- Performance improvement in indexing with a listlike indexer on a :class:`MultiIndex` (:issue:`43370`) - Performance improvement in indexing with a :class:`MultiIndex` indexer on another :class:`MultiIndex` (:issue:43370`) -- Performance improvement in :meth:`GroupBy.quantile` (:issue:`43469`) +- Performance improvement in :meth:`GroupBy.quantile` (:issue:`43469`, :issue:`43725`) +- Performance improvement in :meth:`GroupBy.count` (:issue:`43730`, :issue:`43694`) +- Performance improvement in :meth:`GroupBy.any` and :meth:`GroupBy.all` (:issue:`43675`, :issue:`42841`) +- Performance improvement in :meth:`GroupBy.std` (:issue:`43115`, :issue:`43576`) +- Performance improvement in :meth:`GroupBy.cumsum` (:issue:`43309`) - :meth:`SparseArray.min` and :meth:`SparseArray.max` no longer require converting to a dense array (:issue:`43526`) - Indexing into a :class:`SparseArray` with a ``slice`` with ``step=1`` no longer requires converting to a dense array (:issue:`43777`) - Performance improvement in :meth:`SparseArray.take` with ``allow_fill=False`` (:issue:`43654`) - Performance improvement in :meth:`.Rolling.mean` and :meth:`.Expanding.mean` with ``engine="numba"`` (:issue:`43612`) - Improved performance of :meth:`pandas.read_csv` with ``memory_map=True`` when file encoding is UTF-8 (:issue:`43787`) +- Performance improvement in :meth:`RangeIndex.insert` (:issue:`43988`) +- Performance improvement in :meth:`Index.insert` (:issue:`43953`) +- Performance improvement in :meth:`DatetimeIndex.tolist` (:issue:`43823`) +- Performance improvement in :meth:`DatetimeIndex.union` (:issue:`42353`) +- Performance improvement in :meth:`Series.nsmallest` (:issue:`43696`) +- Performance improvement in :meth:`DataFrame.insert` (:issue:`42998`) +- Performance improvement in :meth:`DataFrame.dropna` (:issue:`43683`) +- Performance improvement in :meth:`DataFrame.fillna` (:issue:`43316`) +- Performance improvement in :meth:`DataFrame.values` (:issue:`43160`) +- Performance improvement in :meth:`DataFrame.select_dtypes` (:issue:`42611`) +- Performance improvement in :class:`DataFrame` reductions (:issue:`43185`, :issue:`43243`, :issue:`43311`, :issue:`43609`) +- Performance improvement in :meth:`Series.unstack` and :meth:`DataFrame.unstack` (:issue:`43335`, :issue:`43352`, :issue:`42704`, :issue:`43025`) +- Performance improvement in :meth:`Series.to_frame` (:issue:`43558`) +- Performance improvement in :meth:`Series.mad` (:issue:`43010`) +- Performance improvement in :func:`merge` (:issue:`43332`) +- Performance improvement in :func:`concat` (:issue:`43354`) - .. ---------------------------------------------------------------------------
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44101
2021-10-19T17:17:07Z
2021-10-19T21:59:52Z
2021-10-19T21:59:52Z
2021-10-19T22:02:49Z
testing broadcast on multiindex
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index 0e6b36a484c47..87af6152b8189 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -668,6 +668,21 @@ def test_arithmetic_with_duplicate_columns(self, op): str(result) result.dtypes + @pytest.mark.parametrize("level", [0, None]) + def test_broadcast_multiindex(self, level): + # GH34388 + df1 = DataFrame({"A": [0, 1, 2], "B": [1, 2, 3]}) + df1.columns = df1.columns.set_names("L1") + + df2 = DataFrame({("A", "C"): [0, 0, 0], ("A", "D"): [0, 0, 0]}) + df2.columns = df2.columns.set_names(["L1", "L2"]) + + result = df1.add(df2, level=level) + expected = DataFrame({("A", "C"): [0, 1, 2], ("A", "D"): [0, 1, 2]}) + expected.columns = expected.columns.set_names(["L1", "L2"]) + + tm.assert_frame_equal(result, expected) + class TestFrameArithmetic: def test_td64_op_nat_casting(self):
- [x] closes #34388 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/44094
2021-10-19T02:20:52Z
2021-11-29T00:59:13Z
2021-11-29T00:59:12Z
2021-11-29T00:59:16Z
ENH: retain RangeIndex in RangeIndex.difference
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index f1054635f44db..4def2e4b93553 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -675,6 +675,9 @@ def _difference(self, other, sort=None): if not isinstance(other, RangeIndex): return super()._difference(other, sort=sort) + if sort is None and self.step < 0: + return self[::-1]._difference(other) + res_name = ops.get_op_result_name(self, other) first = self._range[::-1] if self.step < 0 else self._range @@ -683,36 +686,60 @@ def _difference(self, other, sort=None): overlap = overlap[::-1] if len(overlap) == 0: - result = self.rename(name=res_name) - if sort is None and self.step < 0: - result = result[::-1] - return result + return self.rename(name=res_name) if len(overlap) == len(self): return self[:0].rename(res_name) - if not isinstance(overlap, RangeIndex): - # We won't end up with RangeIndex, so fall back - return super()._difference(other, sort=sort) - if overlap.step != first.step: - # In some cases we might be able to get a RangeIndex back, - # but not worth the effort. - return super()._difference(other, sort=sort) - if overlap[0] == first.start: - # The difference is everything after the intersection - new_rng = range(overlap[-1] + first.step, first.stop, first.step) - elif overlap[-1] == first[-1]: - # The difference is everything before the intersection - new_rng = range(first.start, overlap[0], first.step) + # overlap.step will always be a multiple of self.step (see _intersection) + + if len(overlap) == 1: + if overlap[0] == self[0]: + return self[1:] + + elif overlap[0] == self[-1]: + return self[:-1] + + elif len(self) == 3 and overlap[0] == self[1]: + return self[::2] + + else: + return super()._difference(other, sort=sort) + + if overlap.step == first.step: + if overlap[0] == first.start: + # The difference is everything after the intersection + new_rng = range(overlap[-1] + first.step, first.stop, first.step) + elif overlap[-1] == first[-1]: + # The difference is everything before the intersection + new_rng = range(first.start, overlap[0], first.step) + else: + # The difference is not range-like + # e.g. range(1, 10, 1) and range(3, 7, 1) + return super()._difference(other, sort=sort) + else: - # The difference is not range-like + # We must have len(self) > 1, bc we ruled out above + # len(overlap) == 0 and len(overlap) == len(self) + assert len(self) > 1 + + if overlap.step == first.step * 2: + if overlap[0] == first[0] and overlap[-1] in (first[-1], first[-2]): + # e.g. range(1, 10, 1) and range(1, 10, 2) + return self[1::2] + + elif overlap[0] == first[1] and overlap[-1] in (first[-1], first[-2]): + # e.g. range(1, 10, 1) and range(2, 10, 2) + return self[::2] + + # We can get here with e.g. range(20) and range(0, 10, 2) + + # e.g. range(10) and range(0, 10, 3) return super()._difference(other, sort=sort) new_index = type(self)._simple_new(new_rng, name=res_name) if first is not self._range: new_index = new_index[::-1] - if sort is None and new_index.step < 0: - new_index = new_index[::-1] return new_index def symmetric_difference(self, other, result_name: Hashable = None, sort=None): diff --git a/pandas/tests/indexes/ranges/test_setops.py b/pandas/tests/indexes/ranges/test_setops.py index 53ea11345328c..583391bd96a85 100644 --- a/pandas/tests/indexes/ranges/test_setops.py +++ b/pandas/tests/indexes/ranges/test_setops.py @@ -3,6 +3,11 @@ timedelta, ) +from hypothesis import ( + assume, + given, + strategies as st, +) import numpy as np import pytest @@ -359,11 +364,44 @@ def test_difference_mismatched_step(self): obj = RangeIndex.from_range(range(1, 10), name="foo") result = obj.difference(obj[::2]) - expected = Int64Index(obj[1::2]._values, name=obj.name) + expected = obj[1::2] tm.assert_index_equal(result, expected, exact=True) + result = obj[::-1].difference(obj[::2], sort=False) + tm.assert_index_equal(result, expected[::-1], exact=True) + result = obj.difference(obj[1::2]) - expected = Int64Index(obj[::2]._values, name=obj.name) + expected = obj[::2] + tm.assert_index_equal(result, expected, exact=True) + + result = obj[::-1].difference(obj[1::2], sort=False) + tm.assert_index_equal(result, expected[::-1], exact=True) + + def test_difference_interior_non_preserving(self): + # case with intersection of length 1 but RangeIndex is not preserved + idx = Index(range(10)) + + other = idx[3:4] + result = idx.difference(other) + expected = Int64Index([0, 1, 2, 4, 5, 6, 7, 8, 9]) + tm.assert_index_equal(result, expected, exact=True) + + # case with other.step / self.step > 2 + other = idx[::3] + result = idx.difference(other) + expected = Int64Index([1, 2, 4, 5, 7, 8]) + tm.assert_index_equal(result, expected, exact=True) + + # cases with only reaching one end of left + obj = Index(range(20)) + other = obj[:10:2] + result = obj.difference(other) + expected = Int64Index([1, 3, 5, 7, 9] + list(range(10, 20))) + tm.assert_index_equal(result, expected, exact=True) + + other = obj[1:11:2] + result = obj.difference(other) + expected = Int64Index([0, 2, 4, 6, 8, 10] + list(range(11, 20))) tm.assert_index_equal(result, expected, exact=True) def test_symmetric_difference(self): @@ -391,3 +429,44 @@ def test_symmetric_difference(self): result = left.symmetric_difference(right[1:]) expected = Int64Index([1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14]) tm.assert_index_equal(result, expected) + + +def assert_range_or_not_is_rangelike(index): + """ + Check that we either have a RangeIndex or that this index *cannot* + be represented as a RangeIndex. + """ + if not isinstance(index, RangeIndex) and len(index) > 0: + diff = index[:-1] - index[1:] + assert not (diff == diff[0]).all() + + +@given( + st.integers(-20, 20), + st.integers(-20, 20), + st.integers(-20, 20), + st.integers(-20, 20), + st.integers(-20, 20), + st.integers(-20, 20), +) +def test_range_difference(start1, stop1, step1, start2, stop2, step2): + # test that + # a) we match Int64Index.difference and + # b) we return RangeIndex whenever it is possible to do so. + assume(step1 != 0) + assume(step2 != 0) + + left = RangeIndex(start1, stop1, step1) + right = RangeIndex(start2, stop2, step2) + + result = left.difference(right, sort=None) + assert_range_or_not_is_rangelike(result) + + alt = Int64Index(left).difference(Int64Index(right), sort=None) + tm.assert_index_equal(result, alt, exact="equiv") + + result = left.difference(right, sort=False) + assert_range_or_not_is_rangelike(result) + + alt = Int64Index(left).difference(Int64Index(right), sort=False) + tm.assert_index_equal(result, alt, exact="equiv")
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry This should handle _all_ cases where it is possible to retain RangeIndex, a claim which the hypothesis-baed tests should check.
https://api.github.com/repos/pandas-dev/pandas/pulls/44093
2021-10-19T01:50:12Z
2021-10-19T12:55:47Z
2021-10-19T12:55:47Z
2021-10-19T14:42:53Z
TST: fix groupby-empty xfails
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 8a893db95dc22..3b54918ae99c1 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1536,6 +1536,7 @@ def func(df): result = [index[i] if i >= 0 else np.nan for i in indices] return df._constructor_sliced(result, index=res.index) + func.__name__ = "idxmax" return self._python_apply_general(func, self._obj_with_exclusions) @Appender(DataFrame.idxmin.__doc__) @@ -1557,6 +1558,7 @@ def func(df): result = [index[i] if i >= 0 else np.nan for i in indices] return df._constructor_sliced(result, index=res.index) + func.__name__ = "idxmin" return self._python_apply_general(func, self._obj_with_exclusions) boxplot = boxplot_frame_groupby diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 46e4465667e7e..60c8851f059fe 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -753,6 +753,18 @@ def apply( mutated = True result_values.append(res) + # getattr pattern for __name__ is needed for functools.partial objects + if len(group_keys) == 0 and getattr(f, "__name__", None) not in [ + "idxmin", + "idxmax", + "nanargmin", + "nanargmax", + ]: + # If group_keys is empty, then no function calls have been made, + # so we will not have raised even if this is an invalid dtype. + # So do one dummy call here to raise appropriate TypeError. + f(data.iloc[:0]) + return result_values, mutated @cache_readonly diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 83b096cfc2d05..203d8abb465d0 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -24,6 +24,11 @@ to_datetime, ) import pandas._testing as tm +from pandas.core.arrays import ( + BooleanArray, + FloatingArray, + IntegerArray, +) from pandas.core.base import SpecificationError import pandas.core.common as com @@ -1822,17 +1827,23 @@ def test_pivot_table_values_key_error(): ) @pytest.mark.filterwarnings("ignore:Dropping invalid columns:FutureWarning") @pytest.mark.filterwarnings("ignore:.*Select only valid:FutureWarning") -def test_empty_groupby(columns, keys, values, method, op, request): +def test_empty_groupby(columns, keys, values, method, op, request, using_array_manager): # GH8093 & GH26411 override_dtype = None if ( isinstance(values, Categorical) and not isinstance(columns, list) - and op in ["sum", "prod"] + and op in ["sum", "prod", "skew", "mad"] ): # handled below GH#41291 - pass + + if using_array_manager and op == "mad": + right_msg = "Cannot interpret 'CategoricalDtype.* as a data type" + msg = "Regex pattern \"'Categorical' does not implement.*" + right_msg + mark = pytest.mark.xfail(raises=AssertionError, match=msg) + request.node.add_marker(mark) + elif ( isinstance(values, Categorical) and len(keys) == 1 @@ -1851,11 +1862,7 @@ def test_empty_groupby(columns, keys, values, method, op, request): raises=TypeError, match="'Categorical' does not implement" ) request.node.add_marker(mark) - elif ( - isinstance(values, Categorical) - and len(keys) == 1 - and op in ["mad", "min", "max", "sum", "prod", "skew"] - ): + elif isinstance(values, Categorical) and len(keys) == 1 and op in ["sum", "prod"]: mark = pytest.mark.xfail( raises=AssertionError, match="(DataFrame|Series) are different" ) @@ -1869,7 +1876,30 @@ def test_empty_groupby(columns, keys, values, method, op, request): raises=AssertionError, match="(DataFrame|Series) are different" ) request.node.add_marker(mark) - elif isinstance(values, pd.core.arrays.BooleanArray) and op in ["sum", "prod"]: + elif ( + isinstance(values, (IntegerArray, FloatingArray)) + and op == "mad" + and isinstance(columns, list) + ): + mark = pytest.mark.xfail( + raises=TypeError, match="can only perform ops with numeric values" + ) + request.node.add_marker(mark) + + elif ( + op == "mad" + and not isinstance(columns, list) + and isinstance(values, pd.DatetimeIndex) + and values.tz is not None + and using_array_manager + ): + mark = pytest.mark.xfail( + raises=TypeError, + match=r"Cannot interpret 'datetime64\[ns, US/Eastern\]' as a data type", + ) + request.node.add_marker(mark) + + elif isinstance(values, BooleanArray) and op in ["sum", "prod"]: # We expect to get Int64 back for these override_dtype = "Int64" @@ -1895,19 +1925,29 @@ def get_result(): if columns == "C": # i.e. SeriesGroupBy - if op in ["prod", "sum"]: + if op in ["prod", "sum", "skew"]: # ops that require more than just ordered-ness if df.dtypes[0].kind == "M": # GH#41291 # datetime64 -> prod and sum are invalid - msg = "datetime64 type does not support" + if op == "skew": + msg = "'DatetimeArray' does not implement reduction 'skew'" + else: + msg = "datetime64 type does not support" with pytest.raises(TypeError, match=msg): get_result() return - elif isinstance(values, Categorical): + if op in ["prod", "sum", "skew", "mad"]: + if isinstance(values, Categorical): # GH#41291 - msg = "category type does not support" + if op == "mad": + # mad calls mean, which Categorical doesn't implement + msg = "'Categorical' does not implement reduction 'mean'" + elif op == "skew": + msg = f"'Categorical' does not implement reduction '{op}'" + else: + msg = "category type does not support" with pytest.raises(TypeError, match=msg): get_result() @@ -1954,6 +1994,34 @@ def get_result(): tm.assert_equal(result, expected) return + if ( + op in ["mad", "min", "max", "skew"] + and isinstance(values, Categorical) + and len(keys) == 1 + ): + # Categorical doesn't implement, so with numeric_only=True + # these are dropped and we get an empty DataFrame back + result = get_result() + expected = df.set_index(keys)[[]] + + # with numeric_only=True, these are dropped, and we get + # an empty DataFrame back + if len(keys) != 1: + # Categorical is special without 'observed=True' + lev = Categorical([0], dtype=values.dtype) + mi = MultiIndex.from_product([lev, lev], names=keys) + expected = DataFrame([], columns=[], index=mi) + else: + # all columns are dropped, but we end up with one row + # Categorical is special without 'observed=True' + lev = Categorical([0], dtype=values.dtype) + ci = Index(lev, name=keys[0]) + expected = DataFrame([], columns=[], index=ci) + # expected = df.set_index(keys)[columns] + + tm.assert_equal(result, expected) + return + result = get_result() expected = df.set_index(keys)[columns] if override_dtype is not None:
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44092
2021-10-18T21:04:33Z
2021-10-20T21:09:05Z
2021-10-20T21:09:05Z
2021-10-20T21:51:21Z
BUG: crosstab fails with lists/tuples
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index d6ad5eb2003ce..a78fe1d83cac2 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -573,6 +573,7 @@ Reshaping - Bug in :func:`concat` would fail when the ``objs`` argument all had the same index and the ``keys`` argument contained duplicates (:issue:`43595`) - Bug in :func:`concat` which ignored the ``sort`` parameter (:issue:`43375`) - Fixed bug in :func:`merge` with :class:`MultiIndex` as column index for the ``on`` argument returning an error when assigning a column internally (:issue:`43734`) +- Bug in :func:`crosstab` would fail when inputs are lists or tuples (:issue:`44076`) Sparse ^^^^^^ diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index fcf00276aa8af..edd3599aabe35 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -25,6 +25,7 @@ from pandas.core.dtypes.common import ( is_integer_dtype, is_list_like, + is_nested_list_like, is_scalar, ) from pandas.core.dtypes.generic import ( @@ -625,8 +626,10 @@ def crosstab( if values is not None and aggfunc is None: raise ValueError("values cannot be used without an aggfunc.") - index = com.maybe_make_list(index) - columns = com.maybe_make_list(columns) + if not is_nested_list_like(index): + index = [index] + if not is_nested_list_like(columns): + columns = [columns] common_idx = None pass_objs = [x for x in index + columns if isinstance(x, (ABCSeries, ABCDataFrame))] diff --git a/pandas/tests/reshape/test_crosstab.py b/pandas/tests/reshape/test_crosstab.py index f252b5e1ceedf..74beda01e4b8a 100644 --- a/pandas/tests/reshape/test_crosstab.py +++ b/pandas/tests/reshape/test_crosstab.py @@ -84,10 +84,12 @@ def test_crosstab_multiple(self): expected = expected.unstack("A").fillna(0).astype(np.int64) tm.assert_frame_equal(result, expected) - def test_crosstab_ndarray(self): - a = np.random.randint(0, 5, size=100) - b = np.random.randint(0, 3, size=100) - c = np.random.randint(0, 10, size=100) + @pytest.mark.parametrize("box", [np.array, list, tuple]) + def test_crosstab_ndarray(self, box): + # GH 44076 + a = box(np.random.randint(0, 5, size=100)) + b = box(np.random.randint(0, 3, size=100)) + c = box(np.random.randint(0, 10, size=100)) df = DataFrame({"a": a, "b": b, "c": c}) @@ -100,9 +102,11 @@ def test_crosstab_ndarray(self): tm.assert_frame_equal(result, expected) # assign arbitrary names - result = crosstab(self.df["A"].values, self.df["C"].values) - assert result.index.name == "row_0" - assert result.columns.name == "col_0" + result = crosstab(a, c) + expected = crosstab(df["a"], df["c"]) + expected.index.names = ["row_0"] + expected.columns.names = ["col_0"] + tm.assert_frame_equal(result, expected) def test_crosstab_non_aligned(self): # GH 17005
- [x] closes #44076 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44088
2021-10-18T17:51:45Z
2021-10-18T19:28:50Z
2021-10-18T19:28:50Z
2021-10-19T03:45:30Z
ENH: preserve RangeIndex in insert, delete
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 9eb086ed97180..487a1880caff5 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -15,7 +15,10 @@ import numpy as np -from pandas._libs import index as libindex +from pandas._libs import ( + index as libindex, + lib, +) from pandas._libs.lib import no_default from pandas._typing import ( Dtype, @@ -719,9 +722,41 @@ def symmetric_difference(self, other, result_name: Hashable = None, sort=None): # -------------------------------------------------------------------- + # error: Return type "Index" of "delete" incompatible with return type + # "RangeIndex" in supertype "Index" + def delete(self, loc) -> Index: # type: ignore[override] + # In some cases we can retain RangeIndex, see also + # DatetimeTimedeltaMixin._get_delete_Freq + if is_integer(loc): + if loc == 0 or loc == -len(self): + return self[1:] + if loc == -1 or loc == len(self) - 1: + return self[:-1] + + elif lib.is_list_like(loc): + slc = lib.maybe_indices_to_slice(np.asarray(loc, dtype=np.intp), len(self)) + if isinstance(slc, slice) and slc.step is not None and slc.step < 0: + rng = range(len(self))[slc][::-1] + slc = slice(rng.start, rng.stop, rng.step) + + if isinstance(slc, slice) and slc.step in [1, None]: + # Note: maybe_indices_to_slice will never return a slice + # with 'slc.start is None'; may have slc.stop None in cases + # with negative step + if slc.start == 0: + return self[slc.stop :] + elif slc.stop in [len(self), None]: + return self[: slc.start] + + # TODO: more generally, self.difference(self[slc]), + # once _difference is better about retaining RangeIndex + + return super().delete(loc) + def insert(self, loc: int, item) -> Index: if len(self) and (is_integer(item) or is_float(item)): - # We can retain RangeIndex is inserting at the beginning or end + # We can retain RangeIndex is inserting at the beginning or end, + # or right in the middle. rng = self._range if loc == 0 and item == self[0] - self.step: new_rng = range(rng.start - rng.step, rng.stop, rng.step) @@ -731,6 +766,12 @@ def insert(self, loc: int, item) -> Index: new_rng = range(rng.start, rng.stop + rng.step, rng.step) return type(self)._simple_new(new_rng, name=self.name) + elif len(self) == 2 and item == self[0] + self.step / 2: + # e.g. inserting 1 into [0, 2] + step = int(self.step / 2) + new_rng = range(self.start, self.stop, step) + return type(self)._simple_new(new_rng, name=self.name) + return super().insert(loc, item) def _concat(self, indexes: list[Index], name: Hashable) -> Index: diff --git a/pandas/tests/indexes/ranges/test_range.py b/pandas/tests/indexes/ranges/test_range.py index 9732c0faf9efd..7591620de168a 100644 --- a/pandas/tests/indexes/ranges/test_range.py +++ b/pandas/tests/indexes/ranges/test_range.py @@ -127,18 +127,40 @@ def test_insert(self): expected = Index([0, pd.NaT, 1, 2, 3, 4], dtype=object) tm.assert_index_equal(result, expected) + def test_insert_edges_preserves_rangeindex(self): + idx = Index(range(4, 9, 2)) + + result = idx.insert(0, 2) + expected = Index(range(2, 9, 2)) + tm.assert_index_equal(result, expected, exact=True) + + result = idx.insert(3, 10) + expected = Index(range(4, 11, 2)) + tm.assert_index_equal(result, expected, exact=True) + + def test_insert_middle_preserves_rangeindex(self): + # insert in the middle + idx = Index(range(0, 3, 2)) + result = idx.insert(1, 1) + expected = Index(range(3)) + tm.assert_index_equal(result, expected, exact=True) + + idx = idx * 2 + result = idx.insert(1, 2) + expected = expected * 2 + tm.assert_index_equal(result, expected, exact=True) + def test_delete(self): idx = RangeIndex(5, name="Foo") - expected = idx[1:].astype(int) + expected = idx[1:] result = idx.delete(0) - # TODO: could preserve RangeIndex at the ends - tm.assert_index_equal(result, expected, exact="equiv") + tm.assert_index_equal(result, expected, exact=True) assert result.name == expected.name - expected = idx[:-1].astype(int) + expected = idx[:-1] result = idx.delete(-1) - tm.assert_index_equal(result, expected, exact="equiv") + tm.assert_index_equal(result, expected, exact=True) assert result.name == expected.name msg = "index 5 is out of bounds for axis 0 with size 5" @@ -146,6 +168,60 @@ def test_delete(self): # either depending on numpy version result = idx.delete(len(idx)) + def test_delete_preserves_rangeindex(self): + idx = Index(range(2), name="foo") + + result = idx.delete([1]) + expected = Index(range(1), name="foo") + tm.assert_index_equal(result, expected, exact=True) + + result = idx.delete(1) + tm.assert_index_equal(result, expected, exact=True) + + def test_delete_preserves_rangeindex_list_at_end(self): + idx = RangeIndex(0, 6, 1) + + loc = [2, 3, 4, 5] + result = idx.delete(loc) + expected = idx[:2] + tm.assert_index_equal(result, expected, exact=True) + + result = idx.delete(loc[::-1]) + tm.assert_index_equal(result, expected, exact=True) + + def test_delete_preserves_rangeindex_list_middle(self): + idx = RangeIndex(0, 6, 1) + + loc = [1, 2, 3, 4] + result = idx.delete(loc) + expected = RangeIndex(0, 6, 5) + tm.assert_index_equal(result, expected, exact="equiv") # TODO: retain! + + result = idx.delete(loc[::-1]) + tm.assert_index_equal(result, expected, exact="equiv") # TODO: retain! + + def test_delete_all_preserves_rangeindex(self): + idx = RangeIndex(0, 6, 1) + + loc = [0, 1, 2, 3, 4, 5] + result = idx.delete(loc) + expected = idx[:0] + tm.assert_index_equal(result, expected, exact=True) + + result = idx.delete(loc[::-1]) + tm.assert_index_equal(result, expected, exact=True) + + def test_delete_not_preserving_rangeindex(self): + idx = RangeIndex(0, 6, 1) + + loc = [0, 3, 5] + result = idx.delete(loc) + expected = Int64Index([1, 2, 4]) + tm.assert_index_equal(result, expected, exact=True) + + result = idx.delete(loc[::-1]) + tm.assert_index_equal(result, expected, exact=True) + def test_view(self): i = RangeIndex(0, name="Foo") i_view = i.view()
- [ ] closes #xxxx - [x] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44086
2021-10-18T16:36:25Z
2021-10-18T23:05:28Z
2021-10-18T23:05:28Z
2021-10-18T23:15:52Z
BUG: RangeIndex.difference with sort=None and step<0
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index d6ad5eb2003ce..eee80d10dcb47 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -604,6 +604,7 @@ Other ^^^^^ - Bug in :meth:`CustomBusinessMonthBegin.__add__` (:meth:`CustomBusinessMonthEnd.__add__`) not applying the extra ``offset`` parameter when beginning (end) of the target month is already a business day (:issue:`41356`) - Bug in :meth:`RangeIndex.union` with another ``RangeIndex`` with matching (even) ``step`` and starts differing by strictly less than ``step / 2`` (:issue:`44019`) +- Bug in :meth:`RangeIndex.difference` with ``sort=None`` and ``step<0`` failing to sort (:issue:`44085`) .. ***DO NOT USE THIS SECTION*** diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 36997481b82dd..70c6410e441a1 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -680,7 +680,10 @@ def _difference(self, other, sort=None): overlap = overlap[::-1] if len(overlap) == 0: - return self.rename(name=res_name) + result = self.rename(name=res_name) + if sort is None and self.step < 0: + result = result[::-1] + return result if len(overlap) == len(self): return self[:0].rename(res_name) if not isinstance(overlap, RangeIndex): @@ -704,6 +707,9 @@ def _difference(self, other, sort=None): new_index = type(self)._simple_new(new_rng, name=res_name) if first is not self._range: new_index = new_index[::-1] + + if sort is None and new_index.step < 0: + new_index = new_index[::-1] return new_index def symmetric_difference(self, other, result_name: Hashable = None, sort=None): diff --git a/pandas/tests/indexes/ranges/test_setops.py b/pandas/tests/indexes/ranges/test_setops.py index 07ec70c109f67..53ea11345328c 100644 --- a/pandas/tests/indexes/ranges/test_setops.py +++ b/pandas/tests/indexes/ranges/test_setops.py @@ -317,16 +317,44 @@ def test_difference(self): result = obj.difference(obj[-3:]) tm.assert_index_equal(result, obj[:-3], exact=True) + # Flipping the step of 'other' doesn't affect the result, but + # flipping the stepof 'self' does when sort=None result = obj[::-1].difference(obj[-3:]) + tm.assert_index_equal(result, obj[:-3], exact=True) + + result = obj[::-1].difference(obj[-3:], sort=False) tm.assert_index_equal(result, obj[:-3][::-1], exact=True) result = obj[::-1].difference(obj[-3:][::-1]) + tm.assert_index_equal(result, obj[:-3], exact=True) + + result = obj[::-1].difference(obj[-3:][::-1], sort=False) tm.assert_index_equal(result, obj[:-3][::-1], exact=True) result = obj.difference(obj[2:6]) expected = Int64Index([1, 2, 7, 8, 9], name="foo") tm.assert_index_equal(result, expected) + def test_difference_sort(self): + # GH#44085 ensure we respect the sort keyword + + idx = Index(range(4))[::-1] + other = Index(range(3, 4)) + + result = idx.difference(other) + expected = Index(range(3)) + tm.assert_index_equal(result, expected, exact=True) + + result = idx.difference(other, sort=False) + expected = expected[::-1] + tm.assert_index_equal(result, expected, exact=True) + + # case where the intersection is empty + other = range(10, 12) + result = idx.difference(other, sort=None) + expected = idx[::-1] + tm.assert_index_equal(result, expected, exact=True) + def test_difference_mismatched_step(self): obj = RangeIndex.from_range(range(1, 10), name="foo")
- [ ] closes #xxxx - [x] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44085
2021-10-18T16:08:00Z
2021-10-18T22:45:22Z
2021-10-18T22:45:22Z
2021-11-20T23:20:36Z
DOC: Fix cheatsheet automatic uploading
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 20f7712131ba4..23e452f682b60 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -136,6 +136,9 @@ jobs: echo "${{ secrets.server_ip }} ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBE1Kkopomm7FHG5enATf7SgnpICZ4W2bw+Ho+afqin+w7sMcrsa0je7sbztFAV8YchDkiBKnWTG4cRT+KZgZCaY=" > ~/.ssh/known_hosts if: ${{github.event_name == 'push' && github.ref == 'refs/heads/master'}} + - name: Copy cheatsheets into site directory + run: cp doc/cheatsheet/Pandas_Cheat_Sheet* web/build/ + - name: Upload web run: rsync -az --delete --exclude='pandas-docs' --exclude='docs' web/build/ docs@${{ secrets.server_ip }}:/usr/share/nginx/pandas if: ${{github.event_name == 'push' && github.ref == 'refs/heads/master'}} @@ -147,9 +150,6 @@ jobs: - name: Move docs into site directory run: mv doc/build/html web/build/docs - - name: Copy cheatsheets into site directory - run: cp doc/cheatsheet/Pandas_Cheat_Sheet* web/build/ - - name: Save website as an artifact uses: actions/upload-artifact@v2 with:
Follow up of #44018. I didn't realize before merging that we were first uploading the web directory, and then copying the cheatsheets to it, so they won't be uploaded. The cheatsheets will be removed from the website in the CI of hte previous PR, so would be good to merge this asap, so the cheatsheets are restored.
https://api.github.com/repos/pandas-dev/pandas/pulls/44083
2021-10-18T15:32:24Z
2021-10-18T15:34:17Z
2021-10-18T15:34:16Z
2021-10-18T17:53:54Z
BUG: to_datetime with xarray DataArray and specifie unit errors
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 964f4b83866c9..4465875fc9ae6 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -467,6 +467,7 @@ Conversion ^^^^^^^^^^ - Bug in :class:`UInt64Index` constructor when passing a list containing both positive integers small enough to cast to int64 and integers too large too hold in int64 (:issue:`42201`) - Bug in :class:`Series` constructor returning 0 for missing values with dtype ``int64`` and ``False`` for dtype ``bool`` (:issue:`43017`, :issue:`43018`) +- Bug in :func:`to_datetime` with ``arg:xr.DataArray`` and ``unit="ns"`` specified raises TypeError (:issue:`44053`) - Strings diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 8b37026e16171..10be942a49d15 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -325,7 +325,6 @@ def _convert_listlike_datetimes( ------- Index-like of parsed dates """ - if isinstance(arg, (list, tuple)): arg = np.array(arg, dtype="O") @@ -525,6 +524,7 @@ def _to_datetime_with_unit(arg, unit, name, tz, errors: str) -> Index: arr = arg.astype(f"datetime64[{unit}]") tz_parsed = None else: + arg = np.asarray(arg) arr, tz_parsed = tslib.array_with_unit_to_datetime(arg, unit, errors=errors) if errors == "ignore": diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py index 850ce6df21b7f..3fa6441e47242 100644 --- a/pandas/tests/tools/test_to_datetime.py +++ b/pandas/tests/tools/test_to_datetime.py @@ -2640,6 +2640,25 @@ def test_empty_string_datetime_coerce__unit(): tm.assert_index_equal(expected, result) +@td.skip_if_no("xarray") +def test_xarray_coerce_unit(): + # GH44053 + import xarray as xr + + arr = xr.DataArray([1, 2, 3]) + result = to_datetime(arr, unit="ns") + expected = DatetimeIndex( + [ + "1970-01-01 00:00:00.000000001", + "1970-01-01 00:00:00.000000002", + "1970-01-01 00:00:00.000000003", + ], + dtype="datetime64[ns]", + freq=None, + ) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_monotonic_increasing_index(cache): # GH28238
- [x] closes #44053 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] coerce to numpy array before calling ```tslib.array_with_unit_to_datetime```function Let me know if any change need to be done. Let me know if you need to make any changes. I love the package hope I can start helping more :)
https://api.github.com/repos/pandas-dev/pandas/pulls/44074
2021-10-18T00:20:51Z
2021-10-19T01:43:32Z
2021-10-19T01:43:31Z
2021-10-19T01:43:36Z
Backport PR #44058 on branch 1.3.x (DOC: Start v1.3.5 release notes)
diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst index e05bf9621e2c4..381df50641291 100644 --- a/doc/source/whatsnew/index.rst +++ b/doc/source/whatsnew/index.rst @@ -16,6 +16,7 @@ Version 1.3 .. toctree:: :maxdepth: 2 + v1.3.5 v1.3.4 v1.3.3 v1.3.2 diff --git a/doc/source/whatsnew/v1.3.4.rst b/doc/source/whatsnew/v1.3.4.rst index 22e15ed9f5d71..b46744d51d74d 100644 --- a/doc/source/whatsnew/v1.3.4.rst +++ b/doc/source/whatsnew/v1.3.4.rst @@ -54,4 +54,4 @@ Other Contributors ~~~~~~~~~~~~ -.. contributors:: v1.3.3..v1.3.4|HEAD +.. contributors:: v1.3.3..v1.3.4 diff --git a/doc/source/whatsnew/v1.3.5.rst b/doc/source/whatsnew/v1.3.5.rst new file mode 100644 index 0000000000000..0f1997de2166a --- /dev/null +++ b/doc/source/whatsnew/v1.3.5.rst @@ -0,0 +1,45 @@ +.. _whatsnew_135: + +What's new in 1.3.5 (November ??, 2021) +--------------------------------------- + +These are the changes in pandas 1.3.5. See :ref:`release` for a full changelog +including other versions of pandas. + +{{ header }} + +.. --------------------------------------------------------------------------- + +.. _whatsnew_135.regressions: + +Fixed regressions +~~~~~~~~~~~~~~~~~ +- +- + +.. --------------------------------------------------------------------------- + +.. _whatsnew_135.bug_fixes: + +Bug fixes +~~~~~~~~~ +- +- + +.. --------------------------------------------------------------------------- + +.. _whatsnew_135.other: + +Other +~~~~~ +- +- + +.. --------------------------------------------------------------------------- + +.. _whatsnew_135.contributors: + +Contributors +~~~~~~~~~~~~ + +.. contributors:: v1.3.4..v1.3.5|HEAD
Backport PR #44058: DOC: Start v1.3.5 release notes
https://api.github.com/repos/pandas-dev/pandas/pulls/44072
2021-10-17T17:57:15Z
2021-10-17T19:31:49Z
2021-10-17T19:31:49Z
2021-10-17T19:31:50Z
REF: share Index subclass formatting code
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 0cfd0e970a44c..7a42a37e6f6d1 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1247,11 +1247,11 @@ def _format_data(self, name=None) -> str_t: line_break_each_value=self._is_multi, ) - def _format_attrs(self) -> list[tuple[str_t, str_t | int]]: + def _format_attrs(self) -> list[tuple[str_t, str_t | int | bool | None]]: """ Return a list of tuples of the (attr,formatted_value). """ - attrs: list[tuple[str_t, str_t | int]] = [] + attrs: list[tuple[str_t, str_t | int | bool | None]] = [] if not self._is_multi: attrs.append(("dtype", f"'{self.dtype}'")) @@ -1295,9 +1295,7 @@ def format( return self._format_with_header(header, na_rep=na_rep) - def _format_with_header( - self, header: list[str_t], na_rep: str_t = "NaN" - ) -> list[str_t]: + def _format_with_header(self, header: list[str_t], na_rep: str_t) -> list[str_t]: from pandas.io.formats.format import format_array values = self._values @@ -1387,9 +1385,16 @@ def _summary(self, name=None) -> str_t: head = self[0] if hasattr(head, "format") and not isinstance(head, str): head = head.format() + elif needs_i8_conversion(self.dtype): + # e.g. Timedelta, display as values, not quoted + head = self._formatter_func(head).replace("'", "") tail = self[-1] if hasattr(tail, "format") and not isinstance(tail, str): tail = tail.format() + elif needs_i8_conversion(self.dtype): + # e.g. Timedelta, display as values, not quoted + tail = self._formatter_func(tail).replace("'", "") + index_summary = f", {head} to {tail}" else: index_summary = "" diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 02bbfe69be1b8..bd76f214e0261 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -44,6 +44,8 @@ inherit_names, ) +from pandas.io.formats.printing import pprint_thing + _index_doc_kwargs: dict[str, str] = dict(ibase._index_doc_kwargs) _index_doc_kwargs.update({"target_klass": "CategoricalIndex"}) @@ -180,6 +182,7 @@ def _can_hold_strings(self): codes: np.ndarray categories: Index + ordered: bool | None _data: Categorical _values: Categorical @@ -342,20 +345,18 @@ def _format_attrs(self): if get_option("display.max_categories") == 0 else get_option("display.max_categories") ) + attrs: list[tuple[str, str | int | bool | None]] attrs = [ ( "categories", ibase.default_pprint(self.categories, max_seq_items=max_categories), ), - # error: "CategoricalIndex" has no attribute "ordered" - ("ordered", self.ordered), # type: ignore[attr-defined] + ("ordered", self.ordered), ] extra = super()._format_attrs() return attrs + extra - def _format_with_header(self, header: list[str], na_rep: str = "NaN") -> list[str]: - from pandas.io.formats.printing import pprint_thing - + def _format_with_header(self, header: list[str], na_rep: str) -> list[str]: result = [ pprint_thing(x, escape_chars=("\t", "\r", "\n")) if notna(x) else na_rep for x in self._values diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 4cff33f96de27..b5c68fb7ada54 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -188,6 +188,7 @@ def format( def _format_with_header( self, header: list[str], na_rep: str = "NaT", date_format: str | None = None ) -> list[str]: + # matches base class except for whitespace padding and date_format return header + list( self._format_native_types(na_rep=na_rep, date_format=date_format) ) @@ -207,39 +208,15 @@ def _format_attrs(self): freq = self.freqstr if freq is not None: freq = repr(freq) # e.g. D -> 'D' - # Argument 1 to "append" of "list" has incompatible type - # "Tuple[str, Optional[str]]"; expected "Tuple[str, Union[str, int]]" - attrs.append(("freq", freq)) # type: ignore[arg-type] + attrs.append(("freq", freq)) return attrs + @Appender(Index._summary.__doc__) def _summary(self, name=None) -> str: - """ - Return a summarized representation. - - Parameters - ---------- - name : str - Name to use in the summary representation. - - Returns - ------- - str - Summarized representation of the index. - """ - formatter = self._formatter_func - if len(self) > 0: - index_summary = f", {formatter(self[0])} to {formatter(self[-1])}" - else: - index_summary = "" - - if name is None: - name = type(self).__name__ - result = f"{name}: {len(self)} entries{index_summary}" + result = super()._summary(name=name) if self.freq: result += f"\nFreq: {self.freqstr}" - # display as values, not quoted - result = result.replace("'", "") return result # -------------------------------------------------------------------- diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 165048e2a591a..72398ab9d43f6 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -800,7 +800,8 @@ def length(self) -> Index: # Rendering Methods # __repr__ associated methods are based on MultiIndex - def _format_with_header(self, header: list[str], na_rep: str = "NaN") -> list[str]: + def _format_with_header(self, header: list[str], na_rep: str) -> list[str]: + # matches base class except for whitespace padding return header + list(self._format_native_types(na_rep=na_rep)) def _format_native_types(self, na_rep="NaN", quoting=None, **kwargs): diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 9eb086ed97180..36997481b82dd 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -214,7 +214,8 @@ def _format_data(self, name=None): # we are formatting thru the attributes return None - def _format_with_header(self, header: list[str], na_rep: str = "NaN") -> list[str]: + def _format_with_header(self, header: list[str], na_rep: str) -> list[str]: + # Equivalent to Index implementation, but faster if not len(self._range): return header first_val_str = str(self._range[0])
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44070
2021-10-17T16:45:31Z
2021-10-17T22:00:51Z
2021-10-17T22:00:51Z
2021-10-17T23:56:10Z
BUG: 43909 - check monoticity of rolling groupby
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index ffd32e263aa50..6baed863476a9 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -809,6 +809,7 @@ Groupby/resample/rolling - Bug in :meth:`GroupBy.nth` failing on ``axis=1`` (:issue:`43926`) - Fixed bug in :meth:`Series.rolling` and :meth:`DataFrame.rolling` not respecting right bound on centered datetime-like windows, if the index contain duplicates (:issue:`3944`) - Bug in :meth:`Series.rolling` and :meth:`DataFrame.rolling` when using a :class:`pandas.api.indexers.BaseIndexer` subclass that returned unequal start and end arrays would segfault instead of raising a ``ValueError`` (:issue:`44470`) +- Bug in :meth:`Groupby.rolling` when non-monotonic data passed, fails to correctly raise ``ValueError`` (:issue:`43909`) - Fixed bug where grouping by a :class:`Series` that has a categorical data type and length unequal to the axis of grouping raised ``ValueError`` (:issue:`44179`) Reshaping diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index defae3392bfce..49d696f461300 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -2626,8 +2626,9 @@ def _get_window_indexer(self) -> GroupbyIndexer: def _validate_monotonic(self): """ Validate that on is monotonic; - in this case we have to check only for nans, because - monotonicity was already validated at a higher level. """ - if self._on.hasnans: + if ( + not (self._on.is_monotonic_increasing or self._on.is_monotonic_decreasing) + or self._on.hasnans + ): self._raise_monotonic_error() diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index b60f2e60e1035..814bd6b998182 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -1419,6 +1419,18 @@ def test_groupby_rolling_nan_included(): tm.assert_frame_equal(result, expected) +def test_groupby_rolling_non_monotonic(): + # GH 43909 + + shuffled = [3, 0, 1, 2] + sec = 1_000 + df = DataFrame( + [{"t": Timestamp(2 * x * sec), "x": x + 1, "c": 42} for x in shuffled] + ) + with pytest.raises(ValueError, match=r".* must be monotonic"): + df.groupby("c").rolling(on="t", window="3s") + + @pytest.mark.parametrize("method", ["skew", "kurt"]) def test_rolling_skew_kurt_numerical_stability(method): # GH#6929 diff --git a/pandas/tests/window/test_timeseries_window.py b/pandas/tests/window/test_timeseries_window.py index 7cd319480083b..f2cf7bd47e15b 100644 --- a/pandas/tests/window/test_timeseries_window.py +++ b/pandas/tests/window/test_timeseries_window.py @@ -648,6 +648,9 @@ def test_groupby_monotonic(self): # GH 15130 # we don't need to validate monotonicity when grouping + # GH 43909 we should raise an error here to match + # behaviour of non-groupby rolling. + data = [ ["David", "1/1/2015", 100], ["David", "1/5/2015", 500], @@ -663,6 +666,7 @@ def test_groupby_monotonic(self): df = DataFrame(data=data, columns=["name", "date", "amount"]) df["date"] = to_datetime(df["date"]) + df = df.sort_values("date") expected = ( df.set_index("date") @@ -672,9 +676,11 @@ def test_groupby_monotonic(self): result = df.groupby("name").rolling("180D", on="date")["amount"].sum() tm.assert_series_equal(result, expected) - def test_non_monotonic(self): + def test_non_monotonic_raises(self): # GH 13966 (similar to #15130, closed by #15175) + # superseded by 43909 + dates = date_range(start="2016-01-01 09:30:00", periods=20, freq="s") df = DataFrame( { @@ -684,11 +690,13 @@ def test_non_monotonic(self): } ) - result = df.groupby("A").rolling("4s", on="B").C.mean() expected = ( df.set_index("B").groupby("A").apply(lambda x: x.rolling("4s")["C"].mean()) ) - tm.assert_series_equal(result, expected) + with pytest.raises(ValueError, match=r".* must be monotonic"): + df.groupby("A").rolling( + "4s", on="B" + ).C.mean() # should raise for non-monotonic t series df2 = df.sort_values("B") result = df2.groupby("A").rolling("4s", on="B").C.mean()
- [x] closes #43909 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry Whats new: - Exception now raised when non-monotonic data passed to rolling groupby - Ammendment of tests to reflect this changed behaviour - Addition of test for the new behaviour
https://api.github.com/repos/pandas-dev/pandas/pulls/44068
2021-10-17T16:13:55Z
2021-12-22T15:16:03Z
2021-12-22T15:16:03Z
2021-12-22T15:16:07Z
Backport PR #43199 on branch 1.3.x (BUG: convert_dtypes incorrectly converts byte strings to strings in 1.3+)
diff --git a/doc/source/whatsnew/v1.3.4.rst b/doc/source/whatsnew/v1.3.4.rst index c99f9e28e7fdf..22e15ed9f5d71 100644 --- a/doc/source/whatsnew/v1.3.4.rst +++ b/doc/source/whatsnew/v1.3.4.rst @@ -14,6 +14,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ +- Fixed regression in :meth:`DataFrame.convert_dtypes` incorrectly converts byte strings to strings (:issue:`43183`) - Fixed regression in :meth:`.GroupBy.agg` where it was failing silently with mixed data types along ``axis=1`` and :class:`MultiIndex` (:issue:`43209`) - Fixed regression in :func:`merge` with integer and ``NaN`` keys failing with ``outer`` merge (:issue:`43550`) - Fixed regression in :meth:`DataFrame.corr` raising ``ValueError`` with ``method="spearman"`` on 32-bit platforms (:issue:`43588`) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 66f835212212b..49f31ac82ff8d 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1419,7 +1419,7 @@ def convert_dtypes( inferred_dtype = input_array.dtype if is_string_dtype(inferred_dtype): - if not convert_string: + if not convert_string or inferred_dtype == "bytes": return input_array.dtype else: return pandas_dtype("string") diff --git a/pandas/tests/series/methods/test_convert_dtypes.py b/pandas/tests/series/methods/test_convert_dtypes.py index 81203b944fa92..1e88ddf3cd943 100644 --- a/pandas/tests/series/methods/test_convert_dtypes.py +++ b/pandas/tests/series/methods/test_convert_dtypes.py @@ -226,3 +226,12 @@ def test_convert_bool_dtype(self): # GH32287 df = pd.DataFrame({"A": pd.array([True])}) tm.assert_frame_equal(df, df.convert_dtypes()) + + def test_convert_byte_string_dtype(self): + # GH-43183 + byte_str = b"binary-string" + + df = pd.DataFrame(data={"A": byte_str}, index=[0]) + result = df.convert_dtypes() + expected = df + tm.assert_frame_equal(result, expected)
Backport PR #43199: BUG: convert_dtypes incorrectly converts byte strings to strings in 1.3+
https://api.github.com/repos/pandas-dev/pandas/pulls/44066
2021-10-17T11:04:34Z
2021-10-17T11:53:23Z
2021-10-17T11:53:23Z
2021-10-17T11:53:23Z
BUG: sort_index did not respect ignore_index when not sorting
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index af7706f624323..3d518de98a8a3 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -500,6 +500,7 @@ Indexing - Bug in :meth:`DataFrame.nlargest` and :meth:`Series.nlargest` where sorted result did not count indexes containing ``np.nan`` (:issue:`28984`) - Bug in indexing on a non-unique object-dtype :class:`Index` with an NA scalar (e.g. ``np.nan``) (:issue:`43711`) - Bug in :meth:`Series.__setitem__` with object dtype when setting an array with matching size and dtype='datetime64[ns]' or dtype='timedelta64[ns]' incorrectly converting the datetime/timedeltas to integers (:issue:`43868`) +- Bug in :meth:`DataFrame.sort_index` where ``ignore_index=True`` was not being respected when the index was already sorted (:issue:`43591`) - Bug in :meth:`Index.get_indexer_non_unique` when index contains multiple ``np.datetime64("NaT")`` and ``np.timedelta64("NaT")`` (:issue:`43869`) - diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 71450a1e63bf3..c3ad87082c8ed 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -4633,10 +4633,17 @@ def sort_index( ) if indexer is None: + if inplace: + result = self + else: + result = self.copy() + + if ignore_index: + result.index = default_index(len(self)) if inplace: return else: - return self.copy() + return result baxis = self._get_block_manager_axis(axis) new_data = self._mgr.take(indexer, axis=baxis, verify=False) diff --git a/pandas/tests/frame/methods/test_sort_index.py b/pandas/tests/frame/methods/test_sort_index.py index c1141f705acbc..71822628473f4 100644 --- a/pandas/tests/frame/methods/test_sort_index.py +++ b/pandas/tests/frame/methods/test_sort_index.py @@ -9,6 +9,7 @@ Index, IntervalIndex, MultiIndex, + RangeIndex, Series, Timestamp, ) @@ -418,6 +419,24 @@ def test_sort_index_ignore_index( tm.assert_frame_equal(result_df, expected_df) tm.assert_frame_equal(df, DataFrame(original_dict, index=original_index)) + @pytest.mark.parametrize("inplace", [True, False]) + @pytest.mark.parametrize("ignore_index", [True, False]) + def test_respect_ignore_index(self, inplace, ignore_index): + # GH 43591 + df = DataFrame({"a": [1, 2, 3]}, index=RangeIndex(4, -1, -2)) + result = df.sort_index( + ascending=False, ignore_index=ignore_index, inplace=inplace + ) + + if inplace: + result = df + if ignore_index: + expected = DataFrame({"a": [1, 2, 3]}) + else: + expected = DataFrame({"a": [1, 2, 3]}, index=RangeIndex(4, -1, -2)) + + tm.assert_frame_equal(result, expected) + @pytest.mark.parametrize("inplace", [True, False]) @pytest.mark.parametrize( "original_dict, sorted_dict, ascending, ignore_index, output_index",
- [X] closes #43591 - [X] tests added / passed - [X] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44065
2021-10-17T02:58:03Z
2021-10-19T15:00:17Z
2021-10-19T15:00:16Z
2021-10-19T15:00:25Z
DEPS: bump pyarrow min to 1.0 #41329
diff --git a/ci/deps/actions-38-db-min.yaml b/ci/deps/actions-38-db-min.yaml index a45e3919afd69..f875f2ef88949 100644 --- a/ci/deps/actions-38-db-min.yaml +++ b/ci/deps/actions-38-db-min.yaml @@ -31,7 +31,7 @@ dependencies: - openpyxl - pandas-gbq - protobuf>=3.12.4 - - pyarrow=0.17.1 # GH 38803 + - pyarrow=1.0.1 - pytables>=3.6.1 - scipy - xarray=0.15.1 diff --git a/ci/deps/actions-38-db.yaml b/ci/deps/actions-38-db.yaml index a143de20c2207..3e959f9b7e992 100644 --- a/ci/deps/actions-38-db.yaml +++ b/ci/deps/actions-38-db.yaml @@ -30,7 +30,7 @@ dependencies: - openpyxl - pandas-gbq - psycopg2 - - pyarrow>=0.17.0 + - pyarrow>=1.0.1 - pymysql - pytables - python-snappy diff --git a/ci/deps/actions-38-minimum_versions.yaml b/ci/deps/actions-38-minimum_versions.yaml index cdcb598ccc566..cc1fd022ad24c 100644 --- a/ci/deps/actions-38-minimum_versions.yaml +++ b/ci/deps/actions-38-minimum_versions.yaml @@ -23,7 +23,7 @@ dependencies: - pytables=3.6.1 - python-dateutil=2.8.1 - pytz=2020.1 - - pyarrow=0.17.0 + - pyarrow=1.0.1 - scipy=1.4.1 - xlrd=2.0.1 - xlsxwriter=1.2.2 diff --git a/ci/deps/azure-macos-38.yaml b/ci/deps/azure-macos-38.yaml index 27bfb136005c1..fe6fa6ca37e01 100644 --- a/ci/deps/azure-macos-38.yaml +++ b/ci/deps/azure-macos-38.yaml @@ -22,7 +22,7 @@ dependencies: - numexpr - numpy=1.18.5 - openpyxl - - pyarrow=0.17 + - pyarrow=1.0.1 - pytables - python-dateutil==2.8.1 - pytz diff --git a/ci/deps/azure-windows-38.yaml b/ci/deps/azure-windows-38.yaml index 3e462a7cb3a65..d4e2c482d1c1c 100644 --- a/ci/deps/azure-windows-38.yaml +++ b/ci/deps/azure-windows-38.yaml @@ -25,7 +25,7 @@ dependencies: - numpy=1.18 - openpyxl - jinja2 - - pyarrow=0.17.0 + - pyarrow=1.0.1 - pytables - python-dateutil - pytz diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst index 20ae37c85a9d9..da70549687594 100644 --- a/doc/source/getting_started/install.rst +++ b/doc/source/getting_started/install.rst @@ -360,7 +360,7 @@ PyTables 3.6.1 HDF5-based reading / writing blosc 1.20.1 Compression for HDF5 zlib Compression for HDF5 fastparquet 0.4.0 Parquet reading / writing -pyarrow 0.17.0 Parquet, ORC, and feather reading / writing +pyarrow 1.0.1 Parquet, ORC, and feather reading / writing pyreadstat SPSS files (.sav) reading ========================= ================== ============================================================= diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index c69f17512a8ea..c2ca3df5ca23d 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -5271,15 +5271,6 @@ Several caveats: See the `Full Documentation <https://github.com/wesm/feather>`__. -.. ipython:: python - :suppress: - - import warnings - - # This can be removed once building with pyarrow >=0.15.0 - warnings.filterwarnings("ignore", "The Sparse", FutureWarning) - - .. ipython:: python df = pd.DataFrame( diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 92daa94cde1a5..16ee728a4425a 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -298,7 +298,7 @@ Optional libraries below the lowest tested version may still work, but are not c +-----------------+-----------------+---------+ | openpyxl | 3.0.2 | X | +-----------------+-----------------+---------+ -| pyarrow | 0.17.0 | | +| pyarrow | 1.0.1 | X | +-----------------+-----------------+---------+ | pymysql | 0.10.1 | X | +-----------------+-----------------+---------+ diff --git a/environment.yml b/environment.yml index 1cd1179fc29b0..f5f495bed4d78 100644 --- a/environment.yml +++ b/environment.yml @@ -100,7 +100,7 @@ dependencies: - odfpy - fastparquet>=0.4.0 # pandas.read_parquet, DataFrame.to_parquet - - pyarrow>=0.17.0 # pandas.read_parquet, DataFrame.to_parquet, pandas.read_feather, DataFrame.to_feather + - pyarrow>=1.0.1 # pandas.read_parquet, DataFrame.to_parquet, pandas.read_feather, DataFrame.to_feather - python-snappy # required by pyarrow - pytables>=3.6.1 # pandas.read_hdf, DataFrame.to_hdf diff --git a/requirements-dev.txt b/requirements-dev.txt index 14b9ac8a9393e..b384d3b6af5b8 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -65,7 +65,7 @@ xlsxwriter xlwt odfpy fastparquet>=0.4.0 -pyarrow>=0.17.0 +pyarrow>=1.0.1 python-snappy tables>=3.6.1 s3fs>=0.4.0
- [x] closes #41329 - [ ] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44064
2021-10-17T01:41:04Z
2021-10-18T13:29:03Z
2021-10-18T13:29:02Z
2022-11-18T02:20:52Z
CLN: collected cleanups
diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx index 9fa84a0135a5e..78853ce6e41dc 100644 --- a/pandas/_libs/internals.pyx +++ b/pandas/_libs/internals.pyx @@ -408,7 +408,7 @@ cdef slice indexer_as_slice(intp_t[:] vals): int64_t d if vals is None: - raise TypeError("vals must be ndarray") + raise TypeError("vals must be ndarray") # pragma: no cover n = vals.shape[0] @@ -772,7 +772,7 @@ cdef class BlockManager: self.blocks = blocks self.axes = axes - else: + else: # pragma: no cover raise NotImplementedError("pre-0.14.1 pickles are no longer supported") self._post_setstate() diff --git a/pandas/_libs/join.pyx b/pandas/_libs/join.pyx index c9a4b49f90037..dc3bb09c1b462 100644 --- a/pandas/_libs/join.pyx +++ b/pandas/_libs/join.pyx @@ -952,12 +952,11 @@ def asof_join_nearest(numeric_t[:] left_values, tolerance=None): cdef: - Py_ssize_t left_size, right_size, i + Py_ssize_t left_size, i ndarray[intp_t] left_indexer, right_indexer, bli, bri, fli, fri numeric_t bdiff, fdiff left_size = len(left_values) - right_size = len(right_values) left_indexer = np.empty(left_size, dtype=np.intp) right_indexer = np.empty(left_size, dtype=np.intp) diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 5557882e7e9b9..e8248eeb07395 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -808,9 +808,7 @@ cdef class Tick(SingleConstructorOffset): def nanos(self) -> int64_t: return self.n * self._nanos_inc - # FIXME: This should be typed as datetime, but we DatetimeLikeIndex.insert - # checks self.freq.is_on_offset with a Timedelta sometimes. - def is_on_offset(self, dt) -> bool: + def is_on_offset(self, dt: datetime) -> bool: return True def is_anchored(self) -> bool: diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index b5c68fb7ada54..791c1110e3cd2 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -670,7 +670,11 @@ def _get_insert_freq(self, loc: int, item): freq = self.freq else: # Adding a single item to an empty index may preserve freq - if self.freq.is_on_offset(item): + if isinstance(self.freq, Tick): + # all TimedeltaIndex cases go through here; is_on_offset + # would raise TypeError + freq = self.freq + elif self.freq.is_on_offset(item): freq = self.freq return freq diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 268375864b1df..c1fd29615e1bc 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -155,9 +155,6 @@ def arrays_to_mgr( arrays, axes, consolidate=consolidate ) elif typ == "array": - if len(columns) != len(arrays): - assert len(arrays) == 0 - arrays = [np.array([], dtype=object) for _ in range(len(columns))] return ArrayManager(arrays, [index, columns]) else: raise ValueError(f"'typ' needs to be one of {{'block', 'array'}}, got '{typ}'") diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index a8348b0c5773f..7813182222d67 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -96,7 +96,9 @@ def get_indexer_indexer( return indexer -def get_group_index(labels, shape: Shape, sort: bool, xnull: bool): +def get_group_index( + labels, shape: Shape, sort: bool, xnull: bool +) -> npt.NDArray[np.int64]: """ For the particular label_list, gets the offsets into the hypothetical list representing the totally ordered cartesian product of all possible label @@ -651,7 +653,7 @@ def get_group_index_sorter( def compress_group_index( - group_index: np.ndarray, sort: bool = True + group_index: npt.NDArray[np.int64], sort: bool = True ) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]: """ Group_index is offsets into cartesian product of all possible labels. This diff --git a/pandas/tests/base/test_fillna.py b/pandas/tests/base/test_fillna.py index c6f58af4c5c3a..32c9d288e665d 100644 --- a/pandas/tests/base/test_fillna.py +++ b/pandas/tests/base/test_fillna.py @@ -6,9 +6,6 @@ import numpy as np import pytest -from pandas._libs import iNaT - -from pandas.core.dtypes.common import needs_i8_conversion from pandas.core.dtypes.generic import ABCMultiIndex from pandas import Index @@ -47,24 +44,17 @@ def test_fillna_null(null_obj, index_or_series_obj): elif isinstance(obj, ABCMultiIndex): pytest.skip(f"MultiIndex can't hold '{null_obj}'") - values = obj.values + values = obj._values fill_value = values[0] expected = values.copy() - if needs_i8_conversion(obj.dtype): - values[0:2] = iNaT - expected[0:2] = fill_value - else: - values[0:2] = null_obj - expected[0:2] = fill_value + values[0:2] = null_obj + expected[0:2] = fill_value expected = klass(expected) obj = klass(values) result = obj.fillna(fill_value) - if isinstance(obj, Index): - tm.assert_index_equal(result, expected) - else: - tm.assert_series_equal(result, expected) + tm.assert_equal(result, expected) # check shallow_copied assert obj is not result diff --git a/pandas/tests/base/test_unique.py b/pandas/tests/base/test_unique.py index 1f6b0f1db55d6..95e07583bab66 100644 --- a/pandas/tests/base/test_unique.py +++ b/pandas/tests/base/test_unique.py @@ -96,11 +96,8 @@ def test_nunique_null(null_obj, index_or_series_obj): elif isinstance(obj, pd.MultiIndex): pytest.skip(f"MultiIndex can't hold '{null_obj}'") - values = obj.values - if needs_i8_conversion(obj.dtype): - values[0:2] = iNaT - else: - values[0:2] = null_obj + values = obj._values + values[0:2] = null_obj klass = type(obj) repeated_values = np.repeat(values, range(1, len(values) + 1)) diff --git a/pandas/tests/base/test_value_counts.py b/pandas/tests/base/test_value_counts.py index 10f391a49d98f..5431baf493260 100644 --- a/pandas/tests/base/test_value_counts.py +++ b/pandas/tests/base/test_value_counts.py @@ -5,11 +5,8 @@ import numpy as np import pytest -from pandas._libs import iNaT from pandas.compat import np_array_datetime64_compat -from pandas.core.dtypes.common import needs_i8_conversion - import pandas as pd from pandas import ( DatetimeIndex, @@ -54,11 +51,8 @@ def test_value_counts_null(null_obj, index_or_series_obj): elif isinstance(orig, pd.MultiIndex): pytest.skip(f"MultiIndex can't hold '{null_obj}'") - values = obj.values - if needs_i8_conversion(obj.dtype): - values[0:2] = iNaT - else: - values[0:2] = null_obj + values = obj._values + values[0:2] = null_obj klass = type(obj) repeated_values = np.repeat(values, range(1, len(values) + 1)) diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 1c3739d9aebb8..7566c17eda9e6 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -6,7 +6,6 @@ import numpy as np import pytest -from pandas._libs import iNaT from pandas._libs.tslibs import Timestamp from pandas.core.dtypes.common import ( @@ -37,7 +36,6 @@ Int64Index, UInt64Index, ) -from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin class Base: @@ -548,17 +546,11 @@ def test_fillna(self, index): idx.fillna([idx[0]]) idx = index.copy(deep=True) - values = np.asarray(idx.values) + values = idx._values - if isinstance(index, DatetimeIndexOpsMixin): - values[1] = iNaT - else: - values[1] = np.nan + values[1] = np.nan - if isinstance(index, PeriodIndex): - idx = type(index)(values, freq=index.freq) - else: - idx = type(index)(values) + idx = type(index)(values) expected = np.array([False] * len(idx), dtype=bool) expected[1] = True diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py index 604b68cfcc791..ed9243a5ba8d0 100644 --- a/pandas/tests/indexes/test_common.py +++ b/pandas/tests/indexes/test_common.py @@ -8,12 +8,10 @@ import numpy as np import pytest -from pandas._libs.tslibs import iNaT from pandas.compat import IS64 from pandas.core.dtypes.common import ( is_integer_dtype, - is_period_dtype, needs_i8_conversion, ) @@ -173,21 +171,10 @@ def test_unique(self, index_flat): if not index._can_hold_na: pytest.skip("Skip na-check if index cannot hold na") - if is_period_dtype(index.dtype): - vals = index[[0] * 5]._data - vals[0] = pd.NaT - elif needs_i8_conversion(index.dtype): - vals = index._data._ndarray[[0] * 5] - vals[0] = iNaT - else: - vals = index.values[[0] * 5] - vals[0] = np.nan + vals = index._values[[0] * 5] + vals[0] = np.nan vals_unique = vals[:2] - if index.dtype.kind in ["m", "M"]: - # i.e. needs_i8_conversion but not period_dtype, as above - vals = type(index._data)(vals, dtype=index.dtype) - vals_unique = type(index._data)._simple_new(vals_unique, dtype=index.dtype) idx_nan = index._shallow_copy(vals) idx_unique_nan = index._shallow_copy(vals_unique) assert idx_unique_nan.is_unique is True @@ -378,26 +365,21 @@ def test_hasnans_isnans(self, index_flat): assert idx.hasnans is False idx = index.copy(deep=True) - values = np.asarray(idx.values) + values = idx._values if len(index) == 0: return elif isinstance(index, NumericIndex) and is_integer_dtype(index.dtype): return - elif needs_i8_conversion(index.dtype): - values[1] = iNaT - else: - values[1] = np.nan - if isinstance(index, PeriodIndex): - idx = type(index)(values, freq=index.freq) - else: - idx = type(index)(values) + values[1] = np.nan - expected = np.array([False] * len(idx), dtype=bool) - expected[1] = True - tm.assert_numpy_array_equal(idx._isnan, expected) - assert idx.hasnans is True + idx = type(index)(values) + + expected = np.array([False] * len(idx), dtype=bool) + expected[1] = True + tm.assert_numpy_array_equal(idx._isnan, expected) + assert idx.hasnans is True @pytest.mark.parametrize("na_position", [None, "middle"])
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44063
2021-10-16T22:48:35Z
2021-10-18T01:32:58Z
2021-10-18T01:32:58Z
2021-10-18T01:36:22Z
Backport PR #43291: BUG: Fixes to FixedForwardWindowIndexer and GroupbyIndexer (#43267)
diff --git a/doc/source/whatsnew/v1.3.4.rst b/doc/source/whatsnew/v1.3.4.rst index b6ee2d57a0965..f36c0afac763e 100644 --- a/doc/source/whatsnew/v1.3.4.rst +++ b/doc/source/whatsnew/v1.3.4.rst @@ -33,6 +33,7 @@ Fixed regressions Bug fixes ~~~~~~~~~ +- Fixed bug in :meth:`pandas.DataFrame.groupby.rolling` and :class:`pandas.api.indexers.FixedForwardWindowIndexer` leading to segfaults and window endpoints being mixed across groups (:issue:`43267`) - Fixed bug in :meth:`.GroupBy.mean` with datetimelike values including ``NaT`` values returning incorrect results (:issue:`43132`) - Fixed bug in :meth:`Series.aggregate` not passing the first ``args`` to the user supplied ``func`` in certain cases (:issue:`43357`) - Fixed memory leaks in :meth:`Series.rolling.quantile` and :meth:`Series.rolling.median` (:issue:`43339`) diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py index 04c81ae756855..40522111cbf41 100644 --- a/pandas/core/window/ewm.py +++ b/pandas/core/window/ewm.py @@ -699,7 +699,7 @@ def _get_window_indexer(self) -> GroupbyIndexer: GroupbyIndexer """ window_indexer = GroupbyIndexer( - groupby_indicies=self._grouper.indices, + groupby_indices=self._grouper.indices, window_indexer=ExponentialMovingWindowIndexer, ) return window_indexer diff --git a/pandas/core/window/expanding.py b/pandas/core/window/expanding.py index 02cf31cad7b8d..c03f4653e5b1c 100644 --- a/pandas/core/window/expanding.py +++ b/pandas/core/window/expanding.py @@ -680,7 +680,7 @@ def _get_window_indexer(self) -> GroupbyIndexer: GroupbyIndexer """ window_indexer = GroupbyIndexer( - groupby_indicies=self._grouper.indices, + groupby_indices=self._grouper.indices, window_indexer=ExpandingIndexer, ) return window_indexer diff --git a/pandas/core/window/indexers.py b/pandas/core/window/indexers.py index cef023a647d7f..c4156f214ca68 100644 --- a/pandas/core/window/indexers.py +++ b/pandas/core/window/indexers.py @@ -266,9 +266,9 @@ def get_window_bounds( ) start = np.arange(num_values, dtype="int64") - end_s = start[: -self.window_size] + self.window_size - end_e = np.full(self.window_size, num_values, dtype="int64") - end = np.concatenate([end_s, end_e]) + end = start + self.window_size + if self.window_size: + end[-self.window_size :] = num_values return start, end @@ -279,8 +279,8 @@ class GroupbyIndexer(BaseIndexer): def __init__( self, index_array: np.ndarray | None = None, - window_size: int = 0, - groupby_indicies: dict | None = None, + window_size: int | BaseIndexer = 0, + groupby_indices: dict | None = None, window_indexer: type[BaseIndexer] = BaseIndexer, indexer_kwargs: dict | None = None, **kwargs, @@ -292,9 +292,9 @@ def __init__( np.ndarray of the index of the original object that we are performing a chained groupby operation over. This index has been pre-sorted relative to the groups - window_size : int + window_size : int or BaseIndexer window size during the windowing operation - groupby_indicies : dict or None + groupby_indices : dict or None dict of {group label: [positional index of rows belonging to the group]} window_indexer : BaseIndexer BaseIndexer class determining the start and end bounds of each group @@ -303,11 +303,13 @@ def __init__( **kwargs : keyword arguments that will be available when get_window_bounds is called """ - self.groupby_indicies = groupby_indicies or {} + self.groupby_indices = groupby_indices or {} self.window_indexer = window_indexer - self.indexer_kwargs = indexer_kwargs or {} + self.indexer_kwargs = indexer_kwargs.copy() if indexer_kwargs else {} super().__init__( - index_array, self.indexer_kwargs.pop("window_size", window_size), **kwargs + index_array=index_array, + window_size=self.indexer_kwargs.pop("window_size", window_size), + **kwargs, ) @Appender(get_window_bounds_doc) @@ -323,8 +325,8 @@ def get_window_bounds( # 3) Append the window bounds in group order start_arrays = [] end_arrays = [] - window_indicies_start = 0 - for key, indices in self.groupby_indicies.items(): + window_indices_start = 0 + for key, indices in self.groupby_indices.items(): index_array: np.ndarray | None if self.index_array is not None: @@ -341,18 +343,21 @@ def get_window_bounds( ) start = start.astype(np.int64) end = end.astype(np.int64) - # Cannot use groupby_indicies as they might not be monotonic with the object + assert len(start) == len( + end + ), "these should be equal in length from get_window_bounds" + # Cannot use groupby_indices as they might not be monotonic with the object # we're rolling over - window_indicies = np.arange( - window_indicies_start, window_indicies_start + len(indices) + window_indices = np.arange( + window_indices_start, window_indices_start + len(indices) ) - window_indicies_start += len(indices) + window_indices_start += len(indices) # Extend as we'll be slicing window like [start, end) - window_indicies = np.append( - window_indicies, [window_indicies[-1] + 1] - ).astype(np.int64) - start_arrays.append(window_indicies.take(ensure_platform_int(start))) - end_arrays.append(window_indicies.take(ensure_platform_int(end))) + window_indices = np.append(window_indices, [window_indices[-1] + 1]).astype( + np.int64, copy=False + ) + start_arrays.append(window_indices.take(ensure_platform_int(start))) + end_arrays.append(window_indices.take(ensure_platform_int(end))) start = np.concatenate(start_arrays) end = np.concatenate(end_arrays) return start, end diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 1c714db78fa46..14246ec30ffa7 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -300,8 +300,10 @@ def __iter__(self): center=self.center, closed=self.closed, ) - # From get_window_bounds, those two should be equal in length of array - assert len(start) == len(end) + + assert len(start) == len( + end + ), "these should be equal in length from get_window_bounds" for s, e in zip(start, end): result = obj.iloc[slice(s, e)] @@ -522,6 +524,10 @@ def calc(x): center=self.center, closed=self.closed, ) + assert len(start) == len( + end + ), "these should be equal in length from get_window_bounds" + return func(x, start, end, min_periods) with np.errstate(all="ignore"): @@ -1402,6 +1408,11 @@ def cov_func(x, y): center=self.center, closed=self.closed, ) + + assert len(start) == len( + end + ), "these should be equal in length from get_window_bounds" + with np.errstate(all="ignore"): mean_x_y = window_aggregations.roll_mean( x_array * y_array, start, end, min_periods @@ -1441,6 +1452,11 @@ def corr_func(x, y): center=self.center, closed=self.closed, ) + + assert len(start) == len( + end + ), "these should be equal in length from get_window_bounds" + with np.errstate(all="ignore"): mean_x_y = window_aggregations.roll_mean( x_array * y_array, start, end, min_periods @@ -2316,11 +2332,11 @@ def _get_window_indexer(self) -> GroupbyIndexer: index_array = self._index_array if isinstance(self.window, BaseIndexer): rolling_indexer = type(self.window) - indexer_kwargs = self.window.__dict__ + indexer_kwargs = self.window.__dict__.copy() assert isinstance(indexer_kwargs, dict) # for mypy # We'll be using the index of each group later indexer_kwargs.pop("index_array", None) - window = 0 + window = self.window elif self._win_freq_i8 is not None: rolling_indexer = VariableWindowIndexer window = self._win_freq_i8 @@ -2330,7 +2346,7 @@ def _get_window_indexer(self) -> GroupbyIndexer: window_indexer = GroupbyIndexer( index_array=index_array, window_size=window, - groupby_indicies=self._grouper.indices, + groupby_indices=self._grouper.indices, window_indexer=rolling_indexer, indexer_kwargs=indexer_kwargs, ) diff --git a/pandas/tests/groupby/test_missing.py b/pandas/tests/groupby/test_missing.py index f3149abb52291..2391ef620aea6 100644 --- a/pandas/tests/groupby/test_missing.py +++ b/pandas/tests/groupby/test_missing.py @@ -144,7 +144,7 @@ def test_min_count(func, min_count, value): tm.assert_frame_equal(result, expected) -def test_indicies_with_missing(): +def test_indices_with_missing(): # GH 9304 df = DataFrame({"a": [1, 1, np.nan], "b": [2, 3, 4], "c": [5, 6, 7]}) g = df.groupby(["a", "b"]) diff --git a/pandas/tests/window/test_base_indexer.py b/pandas/tests/window/test_base_indexer.py index 06867e80ee711..5a3bfbb5741c3 100644 --- a/pandas/tests/window/test_base_indexer.py +++ b/pandas/tests/window/test_base_indexer.py @@ -3,7 +3,9 @@ from pandas import ( DataFrame, + MultiIndex, Series, + concat, date_range, ) import pandas._testing as tm @@ -13,6 +15,7 @@ ) from pandas.core.window.indexers import ( ExpandingIndexer, + FixedWindowIndexer, VariableOffsetWindowIndexer, ) @@ -293,3 +296,159 @@ def get_window_bounds(self, num_values, min_periods, center, closed): result = getattr(df.rolling(indexer), func)(*args) expected = DataFrame({"values": values}) tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "indexer_class", [FixedWindowIndexer, FixedForwardWindowIndexer, ExpandingIndexer] +) +@pytest.mark.parametrize("window_size", [1, 2, 12]) +@pytest.mark.parametrize( + "df_data", + [ + {"a": [1, 1], "b": [0, 1]}, + {"a": [1, 2], "b": [0, 1]}, + {"a": [1] * 16, "b": [np.nan, 1, 2, np.nan] + list(range(4, 16))}, + ], +) +def test_indexers_are_reusable_after_groupby_rolling( + indexer_class, window_size, df_data +): + # GH 43267 + df = DataFrame(df_data) + num_trials = 3 + indexer = indexer_class(window_size=window_size) + original_window_size = indexer.window_size + for i in range(num_trials): + df.groupby("a")["b"].rolling(window=indexer, min_periods=1).mean() + assert indexer.window_size == original_window_size + + +@pytest.mark.parametrize( + "window_size, num_values, expected_start, expected_end", + [ + (1, 1, [0], [1]), + (1, 2, [0, 1], [1, 2]), + (2, 1, [0], [1]), + (2, 2, [0, 1], [2, 2]), + (5, 12, range(12), list(range(5, 12)) + [12] * 5), + (12, 5, range(5), [5] * 5), + (0, 0, np.array([]), np.array([])), + (1, 0, np.array([]), np.array([])), + (0, 1, [0], [0]), + ], +) +def test_fixed_forward_indexer_bounds( + window_size, num_values, expected_start, expected_end +): + # GH 43267 + indexer = FixedForwardWindowIndexer(window_size=window_size) + start, end = indexer.get_window_bounds(num_values=num_values) + + tm.assert_numpy_array_equal(start, np.array(expected_start), check_dtype=False) + tm.assert_numpy_array_equal(end, np.array(expected_end), check_dtype=False) + assert len(start) == len(end) + + +@pytest.mark.parametrize( + "df, window_size, expected", + [ + ( + DataFrame({"b": [0, 1, 2], "a": [1, 2, 2]}), + 2, + Series( + [0, 1.5, 2.0], + index=MultiIndex.from_arrays([[1, 2, 2], range(3)], names=["a", None]), + name="b", + dtype=np.float64, + ), + ), + ( + DataFrame( + { + "b": [np.nan, 1, 2, np.nan] + list(range(4, 18)), + "a": [1] * 7 + [2] * 11, + "c": range(18), + } + ), + 12, + Series( + [ + 3.6, + 3.6, + 4.25, + 5.0, + 5.0, + 5.5, + 6.0, + 12.0, + 12.5, + 13.0, + 13.5, + 14.0, + 14.5, + 15.0, + 15.5, + 16.0, + 16.5, + 17.0, + ], + index=MultiIndex.from_arrays( + [[1] * 7 + [2] * 11, range(18)], names=["a", None] + ), + name="b", + dtype=np.float64, + ), + ), + ], +) +def test_rolling_groupby_with_fixed_forward_specific(df, window_size, expected): + # GH 43267 + indexer = FixedForwardWindowIndexer(window_size=window_size) + result = df.groupby("a")["b"].rolling(window=indexer, min_periods=1).mean() + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "group_keys", + [ + (1,), + (1, 2), + (2, 1), + (1, 1, 2), + (1, 2, 1), + (1, 1, 2, 2), + (1, 2, 3, 2, 3), + (1, 1, 2) * 4, + (1, 2, 3) * 5, + ], +) +@pytest.mark.parametrize("window_size", [1, 2, 3, 4, 5, 8, 20]) +def test_rolling_groupby_with_fixed_forward_many(group_keys, window_size): + # GH 43267 + df = DataFrame( + { + "a": np.array(list(group_keys)), + "b": np.arange(len(group_keys), dtype=np.float64) + 17, + "c": np.arange(len(group_keys), dtype=np.int64), + } + ) + + indexer = FixedForwardWindowIndexer(window_size=window_size) + result = df.groupby("a")["b"].rolling(window=indexer, min_periods=1).sum() + result.index.names = ["a", "c"] + + groups = df.groupby("a")[["a", "b"]] + manual = concat( + [ + g.assign( + b=[ + g["b"].iloc[i : i + window_size].sum(min_count=1) + for i in range(len(g)) + ] + ) + for _, g in groups + ] + ) + manual = manual.set_index(["a", "c"])["b"] + + tm.assert_series_equal(result, manual)
Backport PR #43291
https://api.github.com/repos/pandas-dev/pandas/pulls/44061
2021-10-16T20:36:14Z
2021-10-16T21:37:12Z
2021-10-16T21:37:12Z
2021-10-16T21:37:16Z
Backport PR #44057 on branch 1.3.x (DOC: 1.3.4 release date)
diff --git a/doc/source/whatsnew/v1.3.4.rst b/doc/source/whatsnew/v1.3.4.rst index b6ee2d57a0965..2bdebe463ecc0 100644 --- a/doc/source/whatsnew/v1.3.4.rst +++ b/doc/source/whatsnew/v1.3.4.rst @@ -1,6 +1,6 @@ .. _whatsnew_134: -What's new in 1.3.4 (October ??, 2021) +What's new in 1.3.4 (October 17, 2021) -------------------------------------- These are the changes in pandas 1.3.4. See :ref:`release` for a full changelog @@ -44,7 +44,6 @@ Bug fixes Other ~~~~~ - The minimum version of Cython needed to compile pandas is now ``0.29.24`` (:issue:`43729`) -- .. ---------------------------------------------------------------------------
Backport PR #44057: DOC: 1.3.4 release date
https://api.github.com/repos/pandas-dev/pandas/pulls/44060
2021-10-16T19:56:58Z
2021-10-16T21:02:59Z
2021-10-16T21:02:59Z
2021-10-16T21:02:59Z
REF: share ExtensionIndex astype, __getitem__ with Index
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 0cfd0e970a44c..bea31d148a309 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -59,7 +59,10 @@ deprecate_nonkeyword_arguments, doc, ) -from pandas.util._exceptions import find_stack_level +from pandas.util._exceptions import ( + find_stack_level, + rewrite_exception, +) from pandas.core.dtypes.cast import ( can_hold_element, @@ -985,20 +988,40 @@ def astype(self, dtype, copy=True): dtype = pandas_dtype(dtype) if is_dtype_equal(self.dtype, dtype): + # Ensure that self.astype(self.dtype) is self return self.copy() if copy else self + if ( + self.dtype == np.dtype("M8[ns]") + and isinstance(dtype, np.dtype) + and dtype.kind == "M" + and dtype != np.dtype("M8[ns]") + ): + # For now DatetimeArray supports this by unwrapping ndarray, + # but DatetimeIndex doesn't + raise TypeError(f"Cannot cast {type(self).__name__} to dtype") + + values = self._data + if isinstance(values, ExtensionArray): + with rewrite_exception(type(values).__name__, type(self).__name__): + new_values = values.astype(dtype, copy=copy) + elif isinstance(dtype, ExtensionDtype): cls = dtype.construct_array_type() - new_values = cls._from_sequence(self, dtype=dtype, copy=False) - return Index(new_values, dtype=dtype, copy=copy, name=self.name) + # Note: for RangeIndex and CategoricalDtype self vs self._values + # behaves differently here. + new_values = cls._from_sequence(self, dtype=dtype, copy=copy) - try: - casted = self._values.astype(dtype, copy=copy) - except (TypeError, ValueError) as err: - raise TypeError( - f"Cannot cast {type(self).__name__} to dtype {dtype}" - ) from err - return Index(casted, name=self.name, dtype=dtype) + else: + try: + new_values = values.astype(dtype, copy=copy) + except (TypeError, ValueError) as err: + raise TypeError( + f"Cannot cast {type(self).__name__} to dtype {dtype}" + ) from err + + # pass copy=False because any copying will be done in the astype above + return Index(new_values, name=self.name, dtype=new_values.dtype, copy=False) _index_shared_docs[ "take" @@ -4870,8 +4893,6 @@ def __getitem__(self, key): corresponding `Index` subclass. """ - # There's no custom logic to be implemented in __getslice__, so it's - # not overloaded intentionally. getitem = self._data.__getitem__ if is_scalar(key): @@ -4880,25 +4901,32 @@ def __getitem__(self, key): if isinstance(key, slice): # This case is separated from the conditional above to avoid - # pessimization of basic indexing. + # pessimization com.is_bool_indexer and ndim checks. result = getitem(key) # Going through simple_new for performance. return type(self)._simple_new(result, name=self._name) if com.is_bool_indexer(key): + # if we have list[bools, length=1e5] then doing this check+convert + # takes 166 µs + 2.1 ms and cuts the ndarray.__getitem__ + # time below from 3.8 ms to 496 µs + # if we already have ndarray[bool], the overhead is 1.4 µs or .25% key = np.asarray(key, dtype=bool) result = getitem(key) - if not is_scalar(result): - if np.ndim(result) > 1: - deprecate_ndim_indexing(result) - return result - # NB: Using _constructor._simple_new would break if MultiIndex - # didn't override __getitem__ - return self._constructor._simple_new(result, name=self._name) - else: + # Because we ruled out integer above, we always get an arraylike here + if result.ndim > 1: + deprecate_ndim_indexing(result) + if hasattr(result, "_ndarray"): + # i.e. NDArrayBackedExtensionArray + # Unpack to ndarray for MPL compat + return result._ndarray return result + # NB: Using _constructor._simple_new would break if MultiIndex + # didn't override __getitem__ + return self._constructor._simple_new(result, name=self._name) + def _getitem_slice(self: _IndexT, slobj: slice) -> _IndexT: """ Fastpath for __getitem__ when we know we have a slice. diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py index b66c68ccae97b..ccd18f54da327 100644 --- a/pandas/core/indexes/extension.py +++ b/pandas/core/indexes/extension.py @@ -15,17 +15,11 @@ cache_readonly, doc, ) -from pandas.util._exceptions import rewrite_exception -from pandas.core.dtypes.common import ( - is_dtype_equal, - pandas_dtype, -) from pandas.core.dtypes.generic import ABCDataFrame from pandas.core.arrays import IntervalArray from pandas.core.arrays._mixins import NDArrayBackedExtensionArray -from pandas.core.indexers import deprecate_ndim_indexing from pandas.core.indexes.base import Index _T = TypeVar("_T", bound="NDArrayBackedExtensionIndex") @@ -138,22 +132,6 @@ class ExtensionIndex(Index): _data: IntervalArray | NDArrayBackedExtensionArray - # --------------------------------------------------------------------- - # NDarray-Like Methods - - def __getitem__(self, key): - result = self._data[key] - if isinstance(result, type(self._data)): - if result.ndim == 1: - return type(self)(result, name=self._name) - # Unpack to ndarray for MPL compat - - result = result._ndarray - - # Includes cases where we get a 2D ndarray back for MPL compat - deprecate_ndim_indexing(result) - return result - # --------------------------------------------------------------------- def insert(self, loc: int, item) -> Index: @@ -204,33 +182,6 @@ def map(self, mapper, na_action=None): except Exception: return self.astype(object).map(mapper) - @doc(Index.astype) - def astype(self, dtype, copy: bool = True) -> Index: - dtype = pandas_dtype(dtype) - if is_dtype_equal(self.dtype, dtype): - if not copy: - # Ensure that self.astype(self.dtype) is self - return self - return self.copy() - - # error: Non-overlapping equality check (left operand type: "dtype[Any]", right - # operand type: "Literal['M8[ns]']") - if ( - isinstance(self.dtype, np.dtype) - and isinstance(dtype, np.dtype) - and dtype.kind == "M" - and dtype != "M8[ns]" # type: ignore[comparison-overlap] - ): - # For now Datetime supports this by unwrapping ndarray, but DTI doesn't - raise TypeError(f"Cannot cast {type(self).__name__} to dtype") - - with rewrite_exception(type(self._data).__name__, type(self).__name__): - new_values = self._data.astype(dtype, copy=copy) - - # pass copy=False because any copying will be done in the - # _data.astype call above - return Index(new_values, dtype=new_values.dtype, name=self.name, copy=False) - @cache_readonly def _isnan(self) -> npt.NDArray[np.bool_]: # error: Incompatible return value type (got "ExtensionArray", expected
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44059
2021-10-16T18:22:30Z
2021-10-17T22:42:15Z
2021-10-17T22:42:15Z
2021-10-17T23:55:38Z
DOC: Start v1.3.5 release notes
diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst index b94954cf4c361..df33174804a33 100644 --- a/doc/source/whatsnew/index.rst +++ b/doc/source/whatsnew/index.rst @@ -24,6 +24,7 @@ Version 1.3 .. toctree:: :maxdepth: 2 + v1.3.5 v1.3.4 v1.3.3 v1.3.2 diff --git a/doc/source/whatsnew/v1.3.4.rst b/doc/source/whatsnew/v1.3.4.rst index b6ee2d57a0965..a8e11bc2817fc 100644 --- a/doc/source/whatsnew/v1.3.4.rst +++ b/doc/source/whatsnew/v1.3.4.rst @@ -53,4 +53,4 @@ Other Contributors ~~~~~~~~~~~~ -.. contributors:: v1.3.3..v1.3.4|HEAD +.. contributors:: v1.3.3..v1.3.4 diff --git a/doc/source/whatsnew/v1.3.5.rst b/doc/source/whatsnew/v1.3.5.rst new file mode 100644 index 0000000000000..0f1997de2166a --- /dev/null +++ b/doc/source/whatsnew/v1.3.5.rst @@ -0,0 +1,45 @@ +.. _whatsnew_135: + +What's new in 1.3.5 (November ??, 2021) +--------------------------------------- + +These are the changes in pandas 1.3.5. See :ref:`release` for a full changelog +including other versions of pandas. + +{{ header }} + +.. --------------------------------------------------------------------------- + +.. _whatsnew_135.regressions: + +Fixed regressions +~~~~~~~~~~~~~~~~~ +- +- + +.. --------------------------------------------------------------------------- + +.. _whatsnew_135.bug_fixes: + +Bug fixes +~~~~~~~~~ +- +- + +.. --------------------------------------------------------------------------- + +.. _whatsnew_135.other: + +Other +~~~~~ +- +- + +.. --------------------------------------------------------------------------- + +.. _whatsnew_135.contributors: + +Contributors +~~~~~~~~~~~~ + +.. contributors:: v1.3.4..v1.3.5|HEAD
this fails until 1.3.4 tag exists.
https://api.github.com/repos/pandas-dev/pandas/pulls/44058
2021-10-16T18:15:08Z
2021-10-17T17:57:06Z
2021-10-17T17:57:06Z
2021-10-17T17:57:10Z
DOC: 1.3.4 release date
diff --git a/doc/source/whatsnew/v1.3.4.rst b/doc/source/whatsnew/v1.3.4.rst index b6ee2d57a0965..2bdebe463ecc0 100644 --- a/doc/source/whatsnew/v1.3.4.rst +++ b/doc/source/whatsnew/v1.3.4.rst @@ -1,6 +1,6 @@ .. _whatsnew_134: -What's new in 1.3.4 (October ??, 2021) +What's new in 1.3.4 (October 17, 2021) -------------------------------------- These are the changes in pandas 1.3.4. See :ref:`release` for a full changelog @@ -44,7 +44,6 @@ Bug fixes Other ~~~~~ - The minimum version of Cython needed to compile pandas is now ``0.29.24`` (:issue:`43729`) -- .. ---------------------------------------------------------------------------
null
https://api.github.com/repos/pandas-dev/pandas/pulls/44057
2021-10-16T18:07:25Z
2021-10-16T19:56:49Z
2021-10-16T19:56:49Z
2021-10-16T20:06:02Z
Backport PR #44032 on branch 1.3.x ([PERF] fixing memory leak in aggregation.pyx)
diff --git a/doc/source/whatsnew/v1.3.4.rst b/doc/source/whatsnew/v1.3.4.rst index 9c9aacc4b0f52..b6ee2d57a0965 100644 --- a/doc/source/whatsnew/v1.3.4.rst +++ b/doc/source/whatsnew/v1.3.4.rst @@ -35,6 +35,7 @@ Bug fixes ~~~~~~~~~ - Fixed bug in :meth:`.GroupBy.mean` with datetimelike values including ``NaT`` values returning incorrect results (:issue:`43132`) - Fixed bug in :meth:`Series.aggregate` not passing the first ``args`` to the user supplied ``func`` in certain cases (:issue:`43357`) +- Fixed memory leaks in :meth:`Series.rolling.quantile` and :meth:`Series.rolling.median` (:issue:`43339`) .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx index 377fec76b8ccc..18ca018829590 100644 --- a/pandas/_libs/window/aggregations.pyx +++ b/pandas/_libs/window/aggregations.pyx @@ -829,6 +829,7 @@ def roll_median_c(const float64_t[:] values, ndarray[int64_t] start, if not is_monotonic_increasing_bounds: nobs = 0 + skiplist_destroy(sl) sl = skiplist_init(<int>win) skiplist_destroy(sl) @@ -1064,6 +1065,7 @@ def roll_quantile(const float64_t[:] values, ndarray[int64_t] start, if i == 0 or not is_monotonic_increasing_bounds: if not is_monotonic_increasing_bounds: nobs = 0 + skiplist_destroy(skiplist) skiplist = skiplist_init(<int>win) # setup
Backport PR #44032: [PERF] fixing memory leak in aggregation.pyx
https://api.github.com/repos/pandas-dev/pandas/pulls/44055
2021-10-16T17:47:26Z
2021-10-16T19:14:17Z
2021-10-16T19:14:17Z
2021-10-16T19:14:17Z
Fix several typos under pandas/_libs
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 1e05ef443d516..c229c67519a66 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -532,7 +532,7 @@ def group_add(add_t[:, ::1] out, nobs[lab, j] += 1 if nobs[lab, j] == 1: - # i.e. we havent added anything yet; avoid TypeError + # i.e. we haven't added anything yet; avoid TypeError # if e.g. val is a str and sumx[lab, j] is 0 t = val else: @@ -1193,7 +1193,7 @@ def group_rank(float64_t[:, ::1] out, na_option=na_option ) for i in range(len(result)): - # TODO: why cant we do out[:, k] = result? + # TODO: why can't we do out[:, k] = result? out[i, k] = result[i] diff --git a/pandas/_libs/src/klib/khash.h b/pandas/_libs/src/klib/khash.h index 03b11f77580a5..e17d82d51f0fb 100644 --- a/pandas/_libs/src/klib/khash.h +++ b/pandas/_libs/src/klib/khash.h @@ -206,7 +206,7 @@ khuint32_t PANDAS_INLINE murmur2_32to32(khuint32_t k){ } // it is possible to have a special x64-version, which would need less operations, but -// using 32bit version always has also some benifits: +// using 32bit version always has also some benefits: // - one code for 32bit and 64bit builds // - the same case for 32bit and 64bit builds // - no performance difference could be measured compared to a possible x64-version diff --git a/pandas/_libs/src/klib/khash_python.h b/pandas/_libs/src/klib/khash_python.h index c0fca76ef701e..56afea049c1ec 100644 --- a/pandas/_libs/src/klib/khash_python.h +++ b/pandas/_libs/src/klib/khash_python.h @@ -71,7 +71,7 @@ void traced_free(void* ptr){ // The python 3 hash function has the invariant hash(x) == hash(int(x)) == hash(decimal(x)) // and the size of hash may be different by platform / version (long in py2, Py_ssize_t in py3). // We don't need those invariants because types will be cast before hashing, and if Py_ssize_t -// is 64 bits the truncation causes collission issues. Given all that, we use our own +// is 64 bits the truncation causes collision issues. Given all that, we use our own // simple hash, viewing the double bytes as an int64 and using khash's default // hash for 64 bit integers. // GH 13436 showed that _Py_HashDouble doesn't work well with khash @@ -338,13 +338,13 @@ khuint32_t PANDAS_INLINE kh_python_hash_func(PyObject* key) { // are possible if (PyFloat_CheckExact(key)) { // we cannot use kh_float64_hash_func - // becase float(k) == k holds for any int-object k + // because float(k) == k holds for any int-object k // and kh_float64_hash_func doesn't respect it hash = floatobject_hash((PyFloatObject*)key); } else if (PyComplex_CheckExact(key)) { // we cannot use kh_complex128_hash_func - // becase complex(k,0) == k holds for any int-object k + // because complex(k,0) == k holds for any int-object k // and kh_complex128_hash_func doesn't respect it hash = complexobject_hash((PyComplexObject*)key); } diff --git a/pandas/_libs/tslibs/src/datetime/np_datetime_strings.c b/pandas/_libs/tslibs/src/datetime/np_datetime_strings.c index b245ae5880ecb..847e84b21c06c 100644 --- a/pandas/_libs/tslibs/src/datetime/np_datetime_strings.c +++ b/pandas/_libs/tslibs/src/datetime/np_datetime_strings.c @@ -402,7 +402,7 @@ int parse_iso_8601_datetime(const char *str, int len, int want_exc, } parse_timezone: - /* trim any whitepsace between time/timeezone */ + /* trim any whitespace between time/timeezone */ while (sublen > 0 && isspace(*substr)) { ++substr; --sublen; diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx index 60f90cc17ae34..224c5be1f3b7d 100644 --- a/pandas/_libs/tslibs/timezones.pyx +++ b/pandas/_libs/tslibs/timezones.pyx @@ -257,7 +257,7 @@ cdef object get_dst_info(tzinfo tz): ndarray[int64_t] Nanosecond UTC offsets corresponding to DST transitions. str - Desscribing the type of tzinfo object. + Describing the type of tzinfo object. """ cache_key = tz_cache_key(tz) if cache_key is None:
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44043
2021-10-15T16:07:46Z
2021-10-15T18:14:13Z
2021-10-15T18:14:13Z
2021-10-15T21:26:48Z
Fixed metadata propagation in Dataframe.apply (issue #28283)
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 254a004a37c40..4d3317b037e01 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -607,6 +607,7 @@ Reshaping - Bug in :func:`concat` which ignored the ``sort`` parameter (:issue:`43375`) - Fixed bug in :func:`merge` with :class:`MultiIndex` as column index for the ``on`` argument returning an error when assigning a column internally (:issue:`43734`) - Bug in :func:`crosstab` would fail when inputs are lists or tuples (:issue:`44076`) +- Fixed metadata propagation in :meth:`Dataframe.apply` method, consequently fixing the same issue for :meth:`Dataframe.transform`, :meth:`Dataframe.nunique` and :meth:`Dataframe.mode` (:issue:`28283`) Sparse ^^^^^^ diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 5afb19f1d91fe..0224621bf0329 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -8873,7 +8873,7 @@ def apply( args=args, kwargs=kwargs, ) - return op.apply() + return op.apply().__finalize__(self, method="apply") def applymap( self, func: PythonFuncType, na_action: str | None = None, **kwargs diff --git a/pandas/tests/generic/test_finalize.py b/pandas/tests/generic/test_finalize.py index 50ecb74924e2a..c1f8b5dd7cf41 100644 --- a/pandas/tests/generic/test_finalize.py +++ b/pandas/tests/generic/test_finalize.py @@ -233,7 +233,6 @@ ), pytest.param( (pd.DataFrame, frame_data, operator.methodcaller("nunique")), - marks=not_implemented_mark, ), pytest.param( (pd.DataFrame, frame_data, operator.methodcaller("idxmin")), @@ -245,6 +244,9 @@ ), pytest.param( (pd.DataFrame, frame_data, operator.methodcaller("mode")), + ), + pytest.param( + (pd.Series, [0], operator.methodcaller("mode")), marks=not_implemented_mark, ), pytest.param( @@ -467,12 +469,10 @@ frame_mi_data, operator.methodcaller("transform", lambda x: x - x.min()), ), - marks=not_implemented_mark, ), (pd.Series, ([1],), operator.methodcaller("apply", lambda x: x)), pytest.param( (pd.DataFrame, frame_mi_data, operator.methodcaller("apply", lambda x: x)), - marks=not_implemented_mark, ), # Cumulative reductions (pd.Series, ([1],), operator.methodcaller("cumsum")),
Co-authored-by: Mohamad Rkein <mohamad_rkein@usp.br> Co-authored-by: Rafael Rodrigues <rrvsrafael@gmail.com> In reference to #28283 - [ ] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44041
2021-10-15T02:53:36Z
2021-10-29T21:57:49Z
2021-10-29T21:57:49Z
2021-10-29T21:57:49Z
DOC: Fix Docstring for DataFrame nlargest `keep` option (#44040)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 023ffb5a5fbda..1bb3dda0312cd 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -6584,8 +6584,8 @@ def nlargest(self, n, columns, keep: str = "first") -> DataFrame: keep : {'first', 'last', 'all'}, default 'first' Where there are duplicate values: - - `first` : prioritize the first occurrence(s) - - `last` : prioritize the last occurrence(s) + - ``first`` : prioritize the first occurrence(s) + - ``last`` : prioritize the last occurrence(s) - ``all`` : do not drop any duplicates, even it means selecting more than `n` items.
- [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] Add double backticks for DataFrame nlargest `keep` option to keep the style consistent From https://pandas.pydata.org/docs/dev/reference/api/pandas.DataFrame.nlargest.html ![image](https://user-images.githubusercontent.com/25895405/137416950-97b0db11-382f-410f-ac90-50ef2b25c32b.png) To ![image](https://user-images.githubusercontent.com/25895405/137420535-275670da-094c-4533-9eac-82a36f52d5f4.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/44040
2021-10-15T02:02:09Z
2021-10-15T07:32:06Z
2021-10-15T07:32:06Z
2021-10-15T08:35:30Z
REF: dispatch DTI/TDI setops to RangeIndex
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index d309dfc21eb95..4cff33f96de27 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -28,6 +28,7 @@ Resolution, Tick, parsing, + to_offset, ) from pandas.compat.numpy import function as nv from pandas.util._decorators import ( @@ -61,6 +62,7 @@ NDArrayBackedExtensionIndex, inherit_names, ) +from pandas.core.indexes.range import RangeIndex from pandas.core.tools.timedeltas import to_timedelta if TYPE_CHECKING: @@ -433,12 +435,61 @@ def values(self) -> np.ndarray: # -------------------------------------------------------------------- # Set Operation Methods + @cache_readonly + def _as_range_index(self) -> RangeIndex: + # Convert our i8 representations to RangeIndex + # Caller is responsible for checking isinstance(self.freq, Tick) + freq = cast(Tick, self.freq) + tick = freq.delta.value + rng = range(self[0].value, self[-1].value + tick, tick) + return RangeIndex(rng) + + def _can_range_setop(self, other): + return isinstance(self.freq, Tick) and isinstance(other.freq, Tick) + + def _wrap_range_setop(self, other, res_i8): + new_freq = None + if not len(res_i8): + # RangeIndex defaults to step=1, which we don't want. + new_freq = self.freq + elif isinstance(res_i8, RangeIndex): + new_freq = to_offset(Timedelta(res_i8.step)) + res_i8 = res_i8 + + # TODO: we cannot just do + # type(self._data)(res_i8.values, dtype=self.dtype, freq=new_freq) + # because test_setops_preserve_freq fails with _validate_frequency raising. + # This raising is incorrect, as 'on_freq' is incorrect. This will + # be fixed by GH#41493 + res_values = res_i8.values.view(self._data._ndarray.dtype) + result = type(self._data)._simple_new( + res_values, dtype=self.dtype, freq=new_freq + ) + return self._wrap_setop_result(other, result) + + def _range_intersect(self, other, sort): + # Dispatch to RangeIndex intersection logic. + left = self._as_range_index + right = other._as_range_index + res_i8 = left.intersection(right, sort=sort) + return self._wrap_range_setop(other, res_i8) + + def _range_union(self, other, sort): + # Dispatch to RangeIndex union logic. + left = self._as_range_index + right = other._as_range_index + res_i8 = left.union(right, sort=sort) + return self._wrap_range_setop(other, res_i8) + def _intersection(self, other: Index, sort=False) -> Index: """ intersection specialized to the case with matching dtypes and both non-empty. """ other = cast("DatetimeTimedeltaMixin", other) + if self._can_range_setop(other): + return self._range_intersect(other, sort=sort) + if not self._can_fast_intersect(other): result = Index._intersection(self, other, sort=sort) # We need to invalidate the freq because Index._intersection @@ -453,7 +504,6 @@ def _intersection(self, other: Index, sort=False) -> Index: return self._fast_intersect(other, sort) def _fast_intersect(self, other, sort): - # to make our life easier, "sort" the two ranges if self[0] <= other[0]: left, right = self, other @@ -485,19 +535,9 @@ def _can_fast_intersect(self: _T, other: _T) -> bool: # Because freq is not None, we must then be monotonic decreasing return False - elif self.freq.is_anchored(): - # this along with matching freqs ensure that we "line up", - # so intersection will preserve freq - # GH#42104 - return self.freq.n == 1 - - elif isinstance(self.freq, Tick): - # We "line up" if and only if the difference between two of our points - # is a multiple of our freq - diff = self[0] - other[0] - remainder = diff % self.freq.delta - return remainder == Timedelta(0) - + # this along with matching freqs ensure that we "line up", + # so intersection will preserve freq + # Note we are assuming away Ticks, as those go through _range_intersect # GH#42104 return self.freq.n == 1 @@ -516,6 +556,7 @@ def _can_fast_union(self: _T, other: _T) -> bool: return False if len(self) == 0 or len(other) == 0: + # only reached via union_many return True # to make our life easier, "sort" the two ranges @@ -544,10 +585,7 @@ def _fast_union(self: _TDT, other: _TDT, sort=None) -> _TDT: loc = right.searchsorted(left_start, side="left") right_chunk = right._values[:loc] dates = concat_compat((left._values, right_chunk)) - # With sort being False, we can't infer that result.freq == self.freq - # TODO: no tests rely on the _with_freq("infer"); needed? result = type(self)._simple_new(dates, name=self.name) - result = result._with_freq("infer") return result else: left, right = other, self @@ -573,6 +611,9 @@ def _union(self, other, sort): assert isinstance(other, type(self)) assert self.dtype == other.dtype + if self._can_range_setop(other): + return self._range_union(other, sort=sort) + if self._can_fast_union(other): result = self._fast_union(other, sort=sort) # in the case with sort=None, the _can_fast_union check ensures diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 55628ae014ea0..9eb086ed97180 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -562,8 +562,11 @@ def _intersection(self, other: Index, sort=False): if (self.step < 0 and other.step < 0) is not (new_index.step < 0): new_index = new_index[::-1] + if sort is None: - new_index = new_index.sort_values() + # TODO: can revert to just `if sort is None` after GH#43666 + if new_index.step < 0: + new_index = new_index[::-1] return new_index
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry Includes #44019 (but not that whatsnew) I'm optimistic we can eventually dispatch all the relevant cases and then can get rid of the special-cased _fast_union/_fast_intersect.
https://api.github.com/repos/pandas-dev/pandas/pulls/44039
2021-10-15T01:55:22Z
2021-10-16T17:28:17Z
2021-10-16T17:28:16Z
2021-10-16T17:48:07Z
TST: added groupby apply test for nan coerce
diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py index e07d56931db8c..9e15da1bb0c01 100644 --- a/pandas/tests/groupby/test_apply.py +++ b/pandas/tests/groupby/test_apply.py @@ -1157,3 +1157,24 @@ def test_apply_na(dropna): result = dfgrp.apply(lambda grp_df: grp_df.nlargest(1, "z")) expected = dfgrp.apply(lambda x: x.sort_values("z", ascending=False).head(1)) tm.assert_frame_equal(result, expected) + + +def test_apply_empty_string_nan_coerce_bug(): + # GH#24903 + result = ( + DataFrame( + { + "a": [1, 1, 2, 2], + "b": ["", "", "", ""], + "c": pd.to_datetime([1, 2, 3, 4], unit="s"), + } + ) + .groupby(["a", "b"]) + .apply(lambda df: df.iloc[-1]) + ) + expected = DataFrame( + [[1, "", pd.to_datetime(2, unit="s")], [2, "", pd.to_datetime(4, unit="s")]], + columns=["a", "b", "c"], + index=MultiIndex.from_tuples([(1, ""), (2, "")], names=["a", "b"]), + ) + tm.assert_frame_equal(result, expected)
- [x] closes #24903 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] Bug was reported in #24903 that showed empty strings being coerced into Nan when certain apply was used with groupby. Although bug has been fixed, this test identifies the issue if it were to happen again.
https://api.github.com/repos/pandas-dev/pandas/pulls/44038
2021-10-15T01:04:31Z
2021-10-16T15:36:13Z
2021-10-16T15:36:13Z
2021-10-16T15:36:17Z
DOC: Document and annotate Index.reindex (#40328).
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index f6bb8e7af3558..2dbfb23c9b785 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3947,6 +3947,27 @@ def reindex( Parameters ---------- target : an iterable + method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional + * default: exact matches only. + * pad / ffill: find the PREVIOUS index value if no exact match. + * backfill / bfill: use NEXT index value if no exact match + * nearest: use the NEAREST index value if no exact match. Tied + distances are broken by preferring the larger index value. + level : int, optional + Level of multiindex. + limit : int, optional + Maximum number of consecutive labels in ``target`` to match for + inexact matches. + tolerance : int or float, optional + Maximum distance between original and new labels for inexact + matches. The values of the index at the matching locations must + satisfy the equation ``abs(index[indexer] - target) <= tolerance``. + + Tolerance may be a scalar value, which applies the same tolerance + to all values, or list-like, which applies variable tolerance per + element. List-like includes list, tuple, array, Series, and must be + the same size as the index and its dtype must exactly match the + index's type. Returns ------- @@ -3954,6 +3975,28 @@ def reindex( Resulting index. indexer : np.ndarray[np.intp] or None Indices of output values in original index. + + Raises + ------ + TypeError + If ``method`` passed along with ``level``. + ValueError + If non-unique multi-index + ValueError + If non-unique index and ``method`` or ``limit`` passed. + + See Also + -------- + Series.reindex + DataFrame.reindex + + Examples + -------- + >>> idx = pd.Index(['car', 'bike', 'train', 'tractor']) + >>> idx + Index(['car', 'bike', 'train', 'tractor'], dtype='object') + >>> idx.reindex(['car', 'bike']) + (Index(['car', 'bike'], dtype='object'), array([0, 1])) """ # GH6552: preserve names when reindexing to non-named target # (i.e. neither Index nor Series).
- [x] closes #40328 - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/44037
2021-10-14T22:10:42Z
2021-10-16T15:35:07Z
2021-10-16T15:35:06Z
2021-10-16T15:47:43Z
Backport PR #43729: DEPS: Upgrade Deps for Python 3.10
diff --git a/.github/actions/build_pandas/action.yml b/.github/actions/build_pandas/action.yml index d4777bcd1d079..2e4bfea165316 100644 --- a/.github/actions/build_pandas/action.yml +++ b/.github/actions/build_pandas/action.yml @@ -13,5 +13,5 @@ runs: - name: Build Pandas run: | python setup.py build_ext -j 2 - python -m pip install -e . --no-build-isolation --no-use-pep517 + python -m pip install -e . --no-build-isolation --no-use-pep517 --no-index shell: bash -l {0} diff --git a/.github/workflows/sdist.yml b/.github/workflows/sdist.yml index acb574f2ab8c5..78506e3cb61ce 100644 --- a/.github/workflows/sdist.yml +++ b/.github/workflows/sdist.yml @@ -23,7 +23,10 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.7", "3.8", "3.9"] + python-version: ["3.7", "3.8", "3.9", "3.10"] + concurrency: + group: ${{github.ref}}-${{matrix.python-version}}-sdist + cancel-in-progress: ${{github.event_name == 'pull_request'}} steps: - uses: actions/checkout@v2 @@ -50,13 +53,26 @@ jobs: - uses: conda-incubator/setup-miniconda@v2 with: activate-environment: pandas-sdist - python-version: ${{ matrix.python-version }} + python-version: '${{ matrix.python-version }}' - name: Install pandas from sdist run: | - conda list + pip list python -m pip install dist/*.gz + - name: Force oldest supported NumPy + run: | + case "${{matrix.python-version}}" in + 3.7) + pip install numpy==1.17.3 ;; + 3.8) + pip install numpy==1.18.5 ;; + 3.9) + pip install numpy==1.19.3 ;; + 3.10) + pip install numpy==1.21.2 ;; + esac + - name: Import pandas run: | cd .. diff --git a/ci/deps/actions-37-db-min.yaml b/ci/deps/actions-37-db-min.yaml index cae4361ca37a7..b95a39f27acb2 100644 --- a/ci/deps/actions-37-db-min.yaml +++ b/ci/deps/actions-37-db-min.yaml @@ -5,7 +5,7 @@ dependencies: - python=3.7.* # tools - - cython>=0.29.21 + - cython>=0.29.24 - pytest>=6.0 - pytest-cov - pytest-xdist>=1.21 diff --git a/ci/deps/actions-37-db.yaml b/ci/deps/actions-37-db.yaml index 9d680cb8338fd..73d3bf2dcc70a 100644 --- a/ci/deps/actions-37-db.yaml +++ b/ci/deps/actions-37-db.yaml @@ -5,7 +5,7 @@ dependencies: - python=3.7.* # tools - - cython>=0.29.21 + - cython>=0.29.24 - pytest>=6.0 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/actions-37-locale_slow.yaml b/ci/deps/actions-37-locale_slow.yaml index c6eb3b00a63ac..6968c5dcef414 100644 --- a/ci/deps/actions-37-locale_slow.yaml +++ b/ci/deps/actions-37-locale_slow.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.7.* # tools - - cython>=0.29.21 + - cython>=0.29.24 - pytest>=6.0 - pytest-cov - pytest-xdist>=1.21 diff --git a/ci/deps/actions-37-minimum_versions.yaml b/ci/deps/actions-37-minimum_versions.yaml index b97601d18917c..4c3c5ca8b906c 100644 --- a/ci/deps/actions-37-minimum_versions.yaml +++ b/ci/deps/actions-37-minimum_versions.yaml @@ -5,7 +5,7 @@ dependencies: - python=3.7.1 # tools - - cython=0.29.21 + - cython=0.29.24 - pytest>=6.0 - pytest-cov - pytest-xdist>=1.21 diff --git a/ci/deps/actions-37-slow.yaml b/ci/deps/actions-37-slow.yaml index 76eb7ba5693e9..b81aff268f090 100644 --- a/ci/deps/actions-37-slow.yaml +++ b/ci/deps/actions-37-slow.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.7.* # tools - - cython>=0.29.21 + - cython>=0.29.24 - pytest>=6.0 - pytest-cov - pytest-xdist>=1.21 diff --git a/ci/deps/actions-37.yaml b/ci/deps/actions-37.yaml index 2272f8470e209..92868b89c5f05 100644 --- a/ci/deps/actions-37.yaml +++ b/ci/deps/actions-37.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.7.* # tools - - cython>=0.29.21 + - cython>=0.29.24 - pytest>=6.0 - pytest-cov - pytest-xdist>=1.21 diff --git a/ci/deps/actions-38-locale.yaml b/ci/deps/actions-38-locale.yaml index dfed0df77a327..2a3abe0d8e80b 100644 --- a/ci/deps/actions-38-locale.yaml +++ b/ci/deps/actions-38-locale.yaml @@ -5,7 +5,7 @@ dependencies: - python=3.8.* # tools - - cython>=0.29.21 + - cython>=0.29.24 - pytest>=6.0 - pytest-cov - pytest-xdist>=1.21 diff --git a/ci/deps/actions-38-numpydev.yaml b/ci/deps/actions-38-numpydev.yaml index e943053f15600..ad2bd88177650 100644 --- a/ci/deps/actions-38-numpydev.yaml +++ b/ci/deps/actions-38-numpydev.yaml @@ -15,7 +15,7 @@ dependencies: - pytz - pip - pip: - - cython==0.29.21 # GH#34014 + - cython==0.29.24 # GH#34014 - "--extra-index-url https://pypi.anaconda.org/scipy-wheels-nightly/simple" - "--pre" - "numpy" diff --git a/ci/deps/actions-38-slow.yaml b/ci/deps/actions-38-slow.yaml index c464b30e02203..24e132b21cd77 100644 --- a/ci/deps/actions-38-slow.yaml +++ b/ci/deps/actions-38-slow.yaml @@ -5,7 +5,7 @@ dependencies: - python=3.8.* # tools - - cython>=0.29.21 + - cython>=0.29.24 - pytest>=6.0 - pytest-cov - pytest-xdist>=1.21 diff --git a/ci/deps/actions-38.yaml b/ci/deps/actions-38.yaml index 11daa92046eb4..37139982aec0a 100644 --- a/ci/deps/actions-38.yaml +++ b/ci/deps/actions-38.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.8.* # tools - - cython>=0.29.21 + - cython>=0.29.24 - pytest>=6.0 - pytest-cov - pytest-xdist>=1.21 diff --git a/ci/deps/actions-39.yaml b/ci/deps/actions-39.yaml index 03f2bc84bcc01..5ca449d537df3 100644 --- a/ci/deps/actions-39.yaml +++ b/ci/deps/actions-39.yaml @@ -5,7 +5,7 @@ dependencies: - python=3.9.* # tools - - cython>=0.29.21 + - cython>=0.29.24 - pytest>=6.0 - pytest-cov - pytest-xdist>=1.21 diff --git a/ci/deps/azure-macos-37.yaml b/ci/deps/azure-macos-37.yaml index 43e1055347f17..ebf8bcef51721 100644 --- a/ci/deps/azure-macos-37.yaml +++ b/ci/deps/azure-macos-37.yaml @@ -32,6 +32,6 @@ dependencies: - xlwt - pip - pip: - - cython>=0.29.21 + - cython>=0.29.24 - pyreadstat - pyxlsb diff --git a/ci/deps/azure-windows-37.yaml b/ci/deps/azure-windows-37.yaml index 4df55813ea21c..9af67f8bb272b 100644 --- a/ci/deps/azure-windows-37.yaml +++ b/ci/deps/azure-windows-37.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.7.* # tools - - cython>=0.29.21 + - cython>=0.29.24 - pytest>=6.0 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/azure-windows-38.yaml b/ci/deps/azure-windows-38.yaml index 70aa46e8a5851..52a5de70f85dc 100644 --- a/ci/deps/azure-windows-38.yaml +++ b/ci/deps/azure-windows-38.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.8.* # tools - - cython>=0.29.21 + - cython>=0.29.24 - pytest>=6.0 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/circle-37-arm64.yaml b/ci/deps/circle-37-arm64.yaml index 995ebda1f97e7..6483fcff85407 100644 --- a/ci/deps/circle-37-arm64.yaml +++ b/ci/deps/circle-37-arm64.yaml @@ -5,7 +5,7 @@ dependencies: - python=3.7.* # tools - - cython>=0.29.21 + - cython>=0.29.24 - pytest>=6.0 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/doc/source/whatsnew/v1.3.4.rst b/doc/source/whatsnew/v1.3.4.rst index 7fa24ab6225df..9c9aacc4b0f52 100644 --- a/doc/source/whatsnew/v1.3.4.rst +++ b/doc/source/whatsnew/v1.3.4.rst @@ -42,7 +42,7 @@ Bug fixes Other ~~~~~ -- +- The minimum version of Cython needed to compile pandas is now ``0.29.24`` (:issue:`43729`) - .. --------------------------------------------------------------------------- diff --git a/pyproject.toml b/pyproject.toml index 28925ce62a761..991bbfe8a60bd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,18 +4,8 @@ requires = [ "setuptools>=51.0.0", "wheel", - "Cython>=0.29.21,<3", # Note: sync with setup.py - # Numpy requirements for different OS/architectures - # Copied from https://github.com/scipy/scipy/blob/master/pyproject.toml (which is also licensed under BSD) - "numpy==1.17.3; python_version=='3.7' and (platform_machine!='arm64' or platform_system!='Darwin') and platform_machine!='aarch64'", - "numpy==1.18.3; python_version=='3.8' and (platform_machine!='arm64' or platform_system!='Darwin') and platform_machine!='aarch64'", - "numpy==1.19.3; python_version>='3.9' and (platform_machine!='arm64' or platform_system!='Darwin')", - # Aarch64(Python 3.9 requirements are the same as AMD64) - "numpy==1.19.2; python_version=='3.7' and platform_machine=='aarch64'", - "numpy==1.19.2; python_version=='3.8' and platform_machine=='aarch64'", - # Darwin Arm64 - "numpy>=1.20.0; python_version=='3.8' and platform_machine=='arm64' and platform_system=='Darwin'", - "numpy>=1.20.0; python_version=='3.9' and platform_machine=='arm64' and platform_system=='Darwin'" + "Cython>=0.29.24,<3", # Note: sync with setup.py + "oldest-supported-numpy>=0.10" ] # uncomment to enable pep517 after versioneer problem is fixed. # https://github.com/python-versioneer/python-versioneer/issues/193 diff --git a/setup.cfg b/setup.cfg index 6ce66a6f2bdbd..35aaa086d8e3a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -22,6 +22,7 @@ classifiers = Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 + Programming Language :: Python :: 3.10 Topic :: Scientific/Engineering project_urls = Bug Tracker = https://github.com/pandas-dev/pandas/issues @@ -31,7 +32,10 @@ project_urls = [options] packages = find: install_requires = - numpy>=1.17.3 + numpy>=1.17.3; platform_machine!='aarch64' and platform_machine!='arm64' and python_version<'3.10' + numpy>=1.19.2; platform_machine=='aarch64' and python_version<'3.10' + numpy>=1.20.0; platform_machine=='arm64' and python_version<'3.10' + numpy>=1.21.0; python_version>='3.10' python-dateutil>=2.7.3 pytz>=2017.3 python_requires = >=3.7.1 diff --git a/setup.py b/setup.py index 337719053585c..f5151621c9efe 100755 --- a/setup.py +++ b/setup.py @@ -37,7 +37,7 @@ def is_platform_mac(): return sys.platform == "darwin" -min_cython_ver = "0.29.21" # note: sync with pyproject.toml +min_cython_ver = "0.29.24" # note: sync with pyproject.toml try: from Cython import (
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry **THIS IS A MANUAL BACKPORT OF LARGE BUILD CHANGES. PLEASE REVIEW CAREFULLY BEFORE MERGING**
https://api.github.com/repos/pandas-dev/pandas/pulls/44036
2021-10-14T21:40:29Z
2021-10-15T01:33:26Z
2021-10-15T01:33:26Z
2021-10-15T02:24:37Z
TST: adds test for .loc on multiindex for series GH43908
diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py index 104fa2da7a67e..d036773c778e6 100644 --- a/pandas/tests/indexing/multiindex/test_loc.py +++ b/pandas/tests/indexing/multiindex/test_loc.py @@ -912,3 +912,11 @@ def test_loc_keyerror_rightmost_key_missing(): df = df.set_index(["A", "B"]) with pytest.raises(KeyError, match="^1$"): df.loc[(100, 1)] + + +def test_multindex_series_loc_with_tuple_label(): + # GH#43908 + mi = MultiIndex.from_tuples([(1, 2), (3, (4, 5))]) + ser = Series([1, 2], index=mi) + result = ser.loc[(3, (4, 5))] + assert result == 2
- [x] closes #43908 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/44035
2021-10-14T21:01:33Z
2021-10-15T18:15:20Z
2021-10-15T18:15:20Z
2021-10-15T18:15:35Z
CLN: no need for suffices anymore in test_hashtable.py
diff --git a/pandas/tests/libs/test_hashtable.py b/pandas/tests/libs/test_hashtable.py index 8b7304a84c27b..bdc02ff0aa7a8 100644 --- a/pandas/tests/libs/test_hashtable.py +++ b/pandas/tests/libs/test_hashtable.py @@ -370,96 +370,85 @@ def test_unique_for_nan_objects_tuple(): assert len(unique) == 2 -def get_ht_function(fun_name, type_suffix): - return getattr(ht, fun_name) - - @pytest.mark.parametrize( - "dtype, type_suffix", + "dtype", [ - (np.object_, "object"), - (np.complex128, "complex128"), - (np.int64, "int64"), - (np.uint64, "uint64"), - (np.float64, "float64"), - (np.complex64, "complex64"), - (np.int32, "int32"), - (np.uint32, "uint32"), - (np.float32, "float32"), - (np.int16, "int16"), - (np.uint16, "uint16"), - (np.int8, "int8"), - (np.uint8, "uint8"), - (np.intp, "intp"), + np.object_, + np.complex128, + np.int64, + np.uint64, + np.float64, + np.complex64, + np.int32, + np.uint32, + np.float32, + np.int16, + np.uint16, + np.int8, + np.uint8, + np.intp, ], ) class TestHelpFunctions: - def test_value_count(self, dtype, type_suffix, writable): + def test_value_count(self, dtype, writable): N = 43 - value_count = get_ht_function("value_count", type_suffix) expected = (np.arange(N) + N).astype(dtype) values = np.repeat(expected, 5) values.flags.writeable = writable - keys, counts = value_count(values, False) + keys, counts = ht.value_count(values, False) tm.assert_numpy_array_equal(np.sort(keys), expected) assert np.all(counts == 5) - def test_value_count_stable(self, dtype, type_suffix, writable): + def test_value_count_stable(self, dtype, writable): # GH12679 - value_count = get_ht_function("value_count", type_suffix) values = np.array([2, 1, 5, 22, 3, -1, 8]).astype(dtype) values.flags.writeable = writable - keys, counts = value_count(values, False) + keys, counts = ht.value_count(values, False) tm.assert_numpy_array_equal(keys, values) assert np.all(counts == 1) - def test_duplicated_first(self, dtype, type_suffix, writable): + def test_duplicated_first(self, dtype, writable): N = 100 - duplicated = get_ht_function("duplicated", type_suffix) values = np.repeat(np.arange(N).astype(dtype), 5) values.flags.writeable = writable - result = duplicated(values) + result = ht.duplicated(values) expected = np.ones_like(values, dtype=np.bool_) expected[::5] = False tm.assert_numpy_array_equal(result, expected) - def test_ismember_yes(self, dtype, type_suffix, writable): + def test_ismember_yes(self, dtype, writable): N = 127 - ismember = get_ht_function("ismember", type_suffix) arr = np.arange(N).astype(dtype) values = np.arange(N).astype(dtype) arr.flags.writeable = writable values.flags.writeable = writable - result = ismember(arr, values) + result = ht.ismember(arr, values) expected = np.ones_like(values, dtype=np.bool_) tm.assert_numpy_array_equal(result, expected) - def test_ismember_no(self, dtype, type_suffix): + def test_ismember_no(self, dtype): N = 17 - ismember = get_ht_function("ismember", type_suffix) arr = np.arange(N).astype(dtype) values = (np.arange(N) + N).astype(dtype) - result = ismember(arr, values) + result = ht.ismember(arr, values) expected = np.zeros_like(values, dtype=np.bool_) tm.assert_numpy_array_equal(result, expected) - def test_mode(self, dtype, type_suffix, writable): + def test_mode(self, dtype, writable): if dtype in (np.int8, np.uint8): N = 53 else: N = 11111 - mode = get_ht_function("mode", type_suffix) values = np.repeat(np.arange(N).astype(dtype), 5) values[0] = 42 values.flags.writeable = writable - result = mode(values, False) + result = ht.mode(values, False) assert result == 42 - def test_mode_stable(self, dtype, type_suffix, writable): - mode = get_ht_function("mode", type_suffix) + def test_mode_stable(self, dtype, writable): values = np.array([2, 1, 5, 22, 3, -1, 8]).astype(dtype) values.flags.writeable = writable - keys = mode(values, False) + keys = ht.mode(values, False) tm.assert_numpy_array_equal(keys, values) @@ -482,52 +471,47 @@ def test_unique_label_indices_intp(writable): @pytest.mark.parametrize( - "dtype, type_suffix", + "dtype", [ - (np.float64, "float64"), - (np.float32, "float32"), - (np.complex128, "complex128"), - (np.complex64, "complex64"), + np.float64, + np.float32, + np.complex128, + np.complex64, ], ) class TestHelpFunctionsWithNans: - def test_value_count(self, dtype, type_suffix): - value_count = get_ht_function("value_count", type_suffix) + def test_value_count(self, dtype): values = np.array([np.nan, np.nan, np.nan], dtype=dtype) - keys, counts = value_count(values, True) + keys, counts = ht.value_count(values, True) assert len(keys) == 0 - keys, counts = value_count(values, False) + keys, counts = ht.value_count(values, False) assert len(keys) == 1 and np.all(np.isnan(keys)) assert counts[0] == 3 - def test_duplicated_first(self, dtype, type_suffix): - duplicated = get_ht_function("duplicated", type_suffix) + def test_duplicated_first(self, dtype): values = np.array([np.nan, np.nan, np.nan], dtype=dtype) - result = duplicated(values) + result = ht.duplicated(values) expected = np.array([False, True, True]) tm.assert_numpy_array_equal(result, expected) - def test_ismember_yes(self, dtype, type_suffix): - ismember = get_ht_function("ismember", type_suffix) + def test_ismember_yes(self, dtype): arr = np.array([np.nan, np.nan, np.nan], dtype=dtype) values = np.array([np.nan, np.nan], dtype=dtype) - result = ismember(arr, values) + result = ht.ismember(arr, values) expected = np.array([True, True, True], dtype=np.bool_) tm.assert_numpy_array_equal(result, expected) - def test_ismember_no(self, dtype, type_suffix): - ismember = get_ht_function("ismember", type_suffix) + def test_ismember_no(self, dtype): arr = np.array([np.nan, np.nan, np.nan], dtype=dtype) values = np.array([1], dtype=dtype) - result = ismember(arr, values) + result = ht.ismember(arr, values) expected = np.array([False, False, False], dtype=np.bool_) tm.assert_numpy_array_equal(result, expected) - def test_mode(self, dtype, type_suffix): - mode = get_ht_function("mode", type_suffix) + def test_mode(self, dtype): values = np.array([42, np.nan, np.nan, np.nan], dtype=dtype) - assert mode(values, True) == 42 - assert np.isnan(mode(values, False)) + assert ht.mode(values, True) == 42 + assert np.isnan(ht.mode(values, False)) def test_ismember_tuple_with_nans():
- [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry Since we have version with fused types, there is no need to call precise version (i.e. with suffices like int64, int32 and so on) any longer.
https://api.github.com/repos/pandas-dev/pandas/pulls/44034
2021-10-14T20:29:13Z
2021-10-15T18:16:32Z
2021-10-15T18:16:32Z
2021-10-15T18:16:47Z
[PERF] fixing memory leak in aggregation.pyx
diff --git a/doc/source/whatsnew/v1.3.4.rst b/doc/source/whatsnew/v1.3.4.rst index 9c9aacc4b0f52..b6ee2d57a0965 100644 --- a/doc/source/whatsnew/v1.3.4.rst +++ b/doc/source/whatsnew/v1.3.4.rst @@ -35,6 +35,7 @@ Bug fixes ~~~~~~~~~ - Fixed bug in :meth:`.GroupBy.mean` with datetimelike values including ``NaT`` values returning incorrect results (:issue:`43132`) - Fixed bug in :meth:`Series.aggregate` not passing the first ``args`` to the user supplied ``func`` in certain cases (:issue:`43357`) +- Fixed memory leaks in :meth:`Series.rolling.quantile` and :meth:`Series.rolling.median` (:issue:`43339`) .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx index 1941a3c4a37f0..98201a6f58499 100644 --- a/pandas/_libs/window/aggregations.pyx +++ b/pandas/_libs/window/aggregations.pyx @@ -835,6 +835,7 @@ def roll_median_c(const float64_t[:] values, ndarray[int64_t] start, if not is_monotonic_increasing_bounds: nobs = 0 + skiplist_destroy(sl) sl = skiplist_init(<int>win) skiplist_destroy(sl) @@ -1070,6 +1071,7 @@ def roll_quantile(const float64_t[:] values, ndarray[int64_t] start, if i == 0 or not is_monotonic_increasing_bounds: if not is_monotonic_increasing_bounds: nobs = 0 + skiplist_destroy(skiplist) skiplist = skiplist_init(<int>win) # setup
- [x] closes #43339 - [x] tests passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry The old memory needs to be released, before the pointer switches to the new list.
https://api.github.com/repos/pandas-dev/pandas/pulls/44032
2021-10-14T20:06:26Z
2021-10-16T17:46:50Z
2021-10-16T17:46:50Z
2021-10-16T17:47:33Z
Temporarily add back Index._get_attributes_dict for dask compat
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index f6bb8e7af3558..cc0521caaaf6c 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -731,6 +731,22 @@ def _format_duplicate_message(self) -> DataFrame: # -------------------------------------------------------------------- # Index Internals Methods + @final + def _get_attributes_dict(self) -> dict[str_t, Any]: + """ + Return an attributes dict for my class. + + Temporarily added back for compatibility issue in dask, see + https://github.com/pandas-dev/pandas/pull/43895 + """ + warnings.warn( + "The Index._get_attributes_dict method is deprecated, and will be " + "removed in a future version", + DeprecationWarning, + stacklevel=2, + ) + return {k: getattr(self, k, None) for k in self._attributes} + def _shallow_copy(self: _IndexT, values, name: Hashable = no_default) -> _IndexT: """ Create a new Index with the same class as the caller, don't copy the diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 4b7a377570fd5..cbcb00a4230cc 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -1788,3 +1788,11 @@ def test_drop_duplicates_pos_args_deprecation(): result = idx.drop_duplicates("last") expected = Index([2, 3, 1]) tm.assert_index_equal(expected, result) + + +def test_get_attributes_dict_deprecated(): + # https://github.com/pandas-dev/pandas/pull/44028 + idx = Index([1, 2, 3, 1]) + with tm.assert_produces_warning(DeprecationWarning): + attrs = idx._get_attributes_dict() + assert attrs == {"name": None}
See https://github.com/pandas-dev/pandas/pull/43895#issuecomment-938472299. There was of course no need to revert the full PR, just to add this method again.
https://api.github.com/repos/pandas-dev/pandas/pulls/44028
2021-10-14T09:48:22Z
2021-10-14T22:47:57Z
2021-10-14T22:47:57Z
2021-10-14T22:48:00Z
Bug multiple css selectors GH44011
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index c5a22d8766a96..3d89911434bfa 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -544,6 +544,8 @@ Styler - Bug when rendering an empty DataFrame with a named index (:issue:`43305`). - Bug when rendering a single level MultiIndex (:issue:`43383`). - Bug when combining non-sparse rendering and :meth:`.Styler.hide_columns` or :meth:`.Styler.hide_index` (:issue:`43464`) +- Bug setting a table style when using multiple selectors in :class:`.Styler` (:issue:`44011`) +- Other ^^^^^ diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 8ffb8105d4a90..c8b2fbe169df0 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -53,6 +53,7 @@ StylerRenderer, Subset, Tooltips, + format_table_styles, maybe_convert_css_to_tuples, non_reducing_slice, refactor_levels, @@ -2047,7 +2048,7 @@ def set_table_styles( } for key, styles in table_styles.items() for idx in obj.get_indexer_for([key]) - for s in styles + for s in format_table_styles(styles) ] else: table_styles = [ diff --git a/pandas/io/formats/style_render.py b/pandas/io/formats/style_render.py index cfe5b2c2bdfab..0c9259ef5724f 100644 --- a/pandas/io/formats/style_render.py +++ b/pandas/io/formats/style_render.py @@ -226,7 +226,7 @@ def _translate( # construct render dict d = { "uuid": self.uuid, - "table_styles": _format_table_styles(self.table_styles or []), + "table_styles": format_table_styles(self.table_styles or []), "caption": self.caption, } @@ -1173,7 +1173,7 @@ def _is_visible(idx_row, idx_col, lengths) -> bool: return (idx_col, idx_row) in lengths -def _format_table_styles(styles: CSSStyles) -> CSSStyles: +def format_table_styles(styles: CSSStyles) -> CSSStyles: """ looks for multiple CSS selectors and separates them: [{'selector': 'td, th', 'props': 'a:v;'}] diff --git a/pandas/tests/io/formats/style/test_style.py b/pandas/tests/io/formats/style/test_style.py index 6a09018c5b20b..1e2131bb7d792 100644 --- a/pandas/tests/io/formats/style/test_style.py +++ b/pandas/tests/io/formats/style/test_style.py @@ -829,6 +829,19 @@ def test_table_styles_multiple(self): {"selector": "tr", "props": [("color", "green")]}, ] + def test_table_styles_dict_multiple_selectors(self): + # GH 44011 + result = self.df.style.set_table_styles( + [{"selector": "th,td", "props": [("border-left", "2px solid black")]}] + )._translate(True, True)["table_styles"] + + expected = [ + {"selector": "th", "props": [("border-left", "2px solid black")]}, + {"selector": "td", "props": [("border-left", "2px solid black")]}, + ] + + assert result == expected + def test_maybe_convert_css_to_tuples(self): expected = [("a", "b"), ("c", "d e")] assert maybe_convert_css_to_tuples("a:b;c:d e;") == expected
- [x ] closes #44011 - [x ] tests added / passed - [x ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44023
2021-10-14T00:59:05Z
2021-10-16T13:58:11Z
2021-10-16T13:58:10Z
2021-10-16T13:58:15Z
DOC: DatetimeArray.std
diff --git a/doc/source/reference/indexing.rst b/doc/source/reference/indexing.rst index 6e58f487d5f4a..1ce75f5aac877 100644 --- a/doc/source/reference/indexing.rst +++ b/doc/source/reference/indexing.rst @@ -407,6 +407,7 @@ Methods :toctree: api/ DatetimeIndex.mean + DatetimeIndex.std TimedeltaIndex -------------- diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index eb7638df301f7..71d38d3b3f73b 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -1923,6 +1923,26 @@ def std( keepdims: bool = False, skipna: bool = True, ): + """ + Return sample standard deviation over requested axis. + + Normalized by N-1 by default. This can be changed using the ddof argument + + Parameters + ---------- + axis : int optional, default None + Axis for the function to be applied on. + ddof : int, default 1 + Degrees of Freedom. The divisor used in calculations is N - ddof, + where N represents the number of elements. + skipna : bool, default True + Exclude NA/null values. If an entire row/column is NA, the result will be + NA. + + Returns + ------- + Timedelta + """ # Because std is translation-invariant, we can get self.std # by calculating (self - Timestamp(0)).std, and we can do it # without creating a copy by using a view on self._ndarray
`DatetimeIndex.std` is missing a doc-string. Add doc-string to `DatetimeArray.std` as `DatetimeIndex` is re-using `DatetimeArray.std`. There are a few more keywords, but they seem to be not implemented (numpy-compatiblity?) ```py pd.DatetimeIndex([1, 2]).std(out=[]) ValueError: the 'out' parameter is not supported in the pandas implementation of std() pd.DatetimeIndex([1, 2]).std(keepdims=True) ValueError: the 'keepdims' parameter is not supported in the pandas implementation of std() pd.DatetimeIndex([1, 2]).std(dtype=np.dtype("datetime64[ns]")) ValueError: the 'dtype' parameter is not supported in the pandas implementation of std() ``` For some reason, this doc-string is needed for #43828.
https://api.github.com/repos/pandas-dev/pandas/pulls/44020
2021-10-13T22:44:08Z
2021-10-18T19:54:31Z
2021-10-18T19:54:31Z
2022-04-01T01:36:48Z
BUG: RangeIndex.union
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index c5a22d8766a96..d17329902f343 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -548,6 +548,7 @@ Styler Other ^^^^^ - Bug in :meth:`CustomBusinessMonthBegin.__add__` (:meth:`CustomBusinessMonthEnd.__add__`) not applying the extra ``offset`` parameter when beginning (end) of the target month is already a business day (:issue:`41356`) +- Bug in :meth:`RangeIndex.union` with another ``RangeIndex`` with matching (even) ``step`` and starts differing by strictly less than ``step / 2`` (:issue:`44019`) .. ***DO NOT USE THIS SECTION*** diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 90649ad2dcbc1..55628ae014ea0 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -635,10 +635,13 @@ def _union(self, other: Index, sort): return type(self)(start_r, end_r + step_s, step_s) if ( (step_s % 2 == 0) - and (abs(start_s - start_o) <= step_s / 2) - and (abs(end_s - end_o) <= step_s / 2) + and (abs(start_s - start_o) == step_s / 2) + and (abs(end_s - end_o) == step_s / 2) ): + # e.g. range(0, 10, 2) and range(1, 11, 2) + # but not range(0, 20, 4) and range(1, 21, 4) GH#44019 return type(self)(start_r, end_r + step_s / 2, step_s / 2) + elif step_o % step_s == 0: if ( (start_o - start_s) % step_s == 0 diff --git a/pandas/tests/indexes/ranges/test_setops.py b/pandas/tests/indexes/ranges/test_setops.py index e81271d8ee306..07ec70c109f67 100644 --- a/pandas/tests/indexes/ranges/test_setops.py +++ b/pandas/tests/indexes/ranges/test_setops.py @@ -290,6 +290,15 @@ def test_union_sorted(self, unions): tm.assert_index_equal(res2, expected_sorted, exact=True) tm.assert_index_equal(res3, expected_sorted, exact="equiv") + def test_union_same_step_misaligned(self): + # GH#44019 + left = RangeIndex(range(0, 20, 4)) + right = RangeIndex(range(1, 21, 4)) + + result = left.union(right) + expected = Int64Index([0, 1, 4, 5, 8, 9, 12, 13, 16, 17]) + tm.assert_index_equal(result, expected, exact=True) + def test_difference(self): # GH#12034 Cases where we operate against another RangeIndex and may # get back another RangeIndex
- [ ] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44019
2021-10-13T22:12:03Z
2021-10-16T15:39:08Z
2021-10-16T15:39:08Z
2021-10-16T16:10:04Z
DOC: Upload cheatsheets to site
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c8f5f0385732f..20f7712131ba4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -137,7 +137,7 @@ jobs: if: ${{github.event_name == 'push' && github.ref == 'refs/heads/master'}} - name: Upload web - run: rsync -az --delete --exclude='pandas-docs' --exclude='docs' --exclude='Pandas_Cheat_Sheet*' web/build/ docs@${{ secrets.server_ip }}:/usr/share/nginx/pandas + run: rsync -az --delete --exclude='pandas-docs' --exclude='docs' web/build/ docs@${{ secrets.server_ip }}:/usr/share/nginx/pandas if: ${{github.event_name == 'push' && github.ref == 'refs/heads/master'}} - name: Upload dev docs @@ -146,6 +146,10 @@ jobs: - name: Move docs into site directory run: mv doc/build/html web/build/docs + + - name: Copy cheatsheets into site directory + run: cp doc/cheatsheet/Pandas_Cheat_Sheet* web/build/ + - name: Save website as an artifact uses: actions/upload-artifact@v2 with:
- [x] closes #40949 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry I went ahead and modified the `.github/workflows/ci.yml` so that the cheatsheets are copied to the docs build directory once the Sphinx site is built
https://api.github.com/repos/pandas-dev/pandas/pulls/44018
2021-10-13T17:24:19Z
2021-10-18T15:16:36Z
2021-10-18T15:16:36Z
2021-10-28T16:36:12Z
Backport PR #44012 on branch 1.3.x (DOC: minor fixup of 1.3.4 release notes)
diff --git a/doc/source/whatsnew/v1.3.4.rst b/doc/source/whatsnew/v1.3.4.rst index 6f07dc3e1e2f9..7fa24ab6225df 100644 --- a/doc/source/whatsnew/v1.3.4.rst +++ b/doc/source/whatsnew/v1.3.4.rst @@ -15,13 +15,13 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ - Fixed regression in :meth:`.GroupBy.agg` where it was failing silently with mixed data types along ``axis=1`` and :class:`MultiIndex` (:issue:`43209`) -- Fixed regression in :meth:`merge` with integer and ``NaN`` keys failing with ``outer`` merge (:issue:`43550`) +- Fixed regression in :func:`merge` with integer and ``NaN`` keys failing with ``outer`` merge (:issue:`43550`) - Fixed regression in :meth:`DataFrame.corr` raising ``ValueError`` with ``method="spearman"`` on 32-bit platforms (:issue:`43588`) - Fixed performance regression in :meth:`MultiIndex.equals` (:issue:`43549`) - Fixed performance regression in :meth:`.GroupBy.first` and :meth:`.GroupBy.last` with :class:`StringDtype` (:issue:`41596`) - Fixed regression in :meth:`Series.cat.reorder_categories` failing to update the categories on the ``Series`` (:issue:`43232`) - Fixed regression in :meth:`Series.cat.categories` setter failing to update the categories on the ``Series`` (:issue:`43334`) -- Fixed regression in :meth:`pandas.read_csv` raising ``UnicodeDecodeError`` exception when ``memory_map=True`` (:issue:`43540`) +- Fixed regression in :func:`read_csv` raising ``UnicodeDecodeError`` exception when ``memory_map=True`` (:issue:`43540`) - Fixed regression in :meth:`DataFrame.explode` raising ``AssertionError`` when ``column`` is any scalar which is not a string (:issue:`43314`) - Fixed regression in :meth:`Series.aggregate` attempting to pass ``args`` and ``kwargs`` multiple times to the user supplied ``func`` in certain cases (:issue:`43357`) - Fixed regression when iterating over a :class:`DataFrame.groupby.rolling` object causing the resulting DataFrames to have an incorrect index if the input groupings were not sorted (:issue:`43386`)
Backport PR #44012: DOC: minor fixup of 1.3.4 release notes
https://api.github.com/repos/pandas-dev/pandas/pulls/44016
2021-10-13T15:30:23Z
2021-10-13T18:03:49Z
2021-10-13T18:03:49Z
2021-10-13T18:03:49Z
DOC: minor fixup of 1.3.4 release notes
diff --git a/doc/source/whatsnew/v1.3.4.rst b/doc/source/whatsnew/v1.3.4.rst index 963aaa7f9189f..9c9aacc4b0f52 100644 --- a/doc/source/whatsnew/v1.3.4.rst +++ b/doc/source/whatsnew/v1.3.4.rst @@ -15,13 +15,13 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ - Fixed regression in :meth:`.GroupBy.agg` where it was failing silently with mixed data types along ``axis=1`` and :class:`MultiIndex` (:issue:`43209`) -- Fixed regression in :meth:`merge` with integer and ``NaN`` keys failing with ``outer`` merge (:issue:`43550`) +- Fixed regression in :func:`merge` with integer and ``NaN`` keys failing with ``outer`` merge (:issue:`43550`) - Fixed regression in :meth:`DataFrame.corr` raising ``ValueError`` with ``method="spearman"`` on 32-bit platforms (:issue:`43588`) - Fixed performance regression in :meth:`MultiIndex.equals` (:issue:`43549`) - Fixed performance regression in :meth:`.GroupBy.first` and :meth:`.GroupBy.last` with :class:`StringDtype` (:issue:`41596`) - Fixed regression in :meth:`Series.cat.reorder_categories` failing to update the categories on the ``Series`` (:issue:`43232`) - Fixed regression in :meth:`Series.cat.categories` setter failing to update the categories on the ``Series`` (:issue:`43334`) -- Fixed regression in :meth:`pandas.read_csv` raising ``UnicodeDecodeError`` exception when ``memory_map=True`` (:issue:`43540`) +- Fixed regression in :func:`read_csv` raising ``UnicodeDecodeError`` exception when ``memory_map=True`` (:issue:`43540`) - Fixed regression in :meth:`DataFrame.explode` raising ``AssertionError`` when ``column`` is any scalar which is not a string (:issue:`43314`) - Fixed regression in :meth:`Series.aggregate` attempting to pass ``args`` and ``kwargs`` multiple times to the user supplied ``func`` in certain cases (:issue:`43357`) - Fixed regression when iterating over a :class:`DataFrame.groupby.rolling` object causing the resulting DataFrames to have an incorrect index if the input groupings were not sorted (:issue:`43386`)
null
https://api.github.com/repos/pandas-dev/pandas/pulls/44012
2021-10-13T11:52:55Z
2021-10-13T15:29:56Z
2021-10-13T15:29:55Z
2021-10-13T16:22:01Z
DOC: Added DataFrame in Parameters & Return description in the docstring
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 8b37026e16171..21357f37853d9 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -692,7 +692,8 @@ def to_datetime( Parameters ---------- arg : int, float, str, datetime, list, tuple, 1-d array, Series, DataFrame/dict-like - The object to convert to a datetime. + The object to convert to a datetime. If the DataFrame is provided, the method + expects minimally the following columns: "year", "month", "day". errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception. - If 'coerce', then invalid parsing will be set as NaT. @@ -775,6 +776,7 @@ def to_datetime( - DatetimeIndex, if timezone naive or aware with the same timezone - Index of object dtype, if timezone aware with mixed time offsets - Series: Series of datetime64 dtype + - DataFrame: Series of datetime64 dtype - scalar: Timestamp In case when it is not possible to return designated types (e.g. when
- [x] closes #41676 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry Hello! I have added the following information to the source code (docstring of to_datetime method): 1. In case when DataFrame is passed as a parameter, the to_datetime method expects the following column names (minimally "year", "month", "day"). 2. The method returns Series when the DataFrame is passed. I have created Docker Image & Python Environment, then tested my docstring update by using `validate_docstrings.py` script. Before testing my update, I have run the script on master branch. The script found 2 errors on master branch: ``` 2 Errors found: No extended summary found flake8 error: E999 SyntaxError: invalid syntax ``` Then, I switched to my [feature branch](https://github.com/abatomunkuev/pandas/tree/issue%2341676) and run the `validate_docstrings.py` script. Output of the script ``` ################################################################################ ######################## Docstring (pandas.to_datetime) ######################## ################################################################################ Convert argument to datetime. Parameters ---------- arg : int, float, str, datetime, list, tuple, 1-d array, Series, DataFrame/dict-like The object to convert to a datetime. If the DataFrame is provided, the method expects minimally the following columns ( "year", "month", "day") in the DataFrame. errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception. - If 'coerce', then invalid parsing will be set as NaT. - If 'ignore', then invalid parsing will return the input. dayfirst : bool, default False Specify a date parse order if `arg` is str or its list-likes. If True, parses dates with the day first, eg 10/11/12 is parsed as 2012-11-10. .. warning:: dayfirst=True is not strict, but will prefer to parse with day first. If a delimited date string cannot be parsed in accordance with the given `dayfirst` option, e.g. ``to_datetime(['31-12-2021'])``, then a warning will be shown. yearfirst : bool, default False Specify a date parse order if `arg` is str or its list-likes. - If True parses dates with the year first, eg 10/11/12 is parsed as 2010-11-12. - If both dayfirst and yearfirst are True, yearfirst is preceded (same as dateutil). .. warning:: yearfirst=True is not strict, but will prefer to parse with year first. utc : bool, default None Return UTC DatetimeIndex if True (converting any tz-aware datetime.datetime objects as well). format : str, default None The strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse all the way up to nanoseconds. See strftime documentation for more information on choices: https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior. exact : bool, True by default Behaves as: - If True, require an exact format match. - If False, allow the format to match anywhere in the target string. unit : str, default 'ns' The unit of the arg (D,s,ms,us,ns) denote the unit, which is an integer or float number. This will be based off the origin. Example, with unit='ms' and origin='unix' (the default), this would calculate the number of milliseconds to the unix epoch start. infer_datetime_format : bool, default False If True and no `format` is given, attempt to infer the format of the datetime strings based on the first non-NaN element, and if it can be inferred, switch to a faster method of parsing them. In some cases this can increase the parsing speed by ~5-10x. origin : scalar, default 'unix' Define the reference date. The numeric values would be parsed as number of units (defined by `unit`) since this reference date. - If 'unix' (or POSIX) time; origin is set to 1970-01-01. - If 'julian', unit must be 'D', and origin is set to beginning of Julian Calendar. Julian day number 0 is assigned to the day starting at noon on January 1, 4713 BC. - If Timestamp convertible, origin is set to Timestamp identified by origin. cache : bool, default True If True, use a cache of unique, converted dates to apply the datetime conversion. May produce significant speed-up when parsing duplicate date strings, especially ones with timezone offsets. The cache is only used when there are at least 50 values. The presence of out-of-bounds values will render the cache unusable and may slow down parsing. .. versionchanged:: 0.25.0 - changed default value from False to True. Returns ------- datetime If parsing succeeded. Return type depends on input: - list-like: - DatetimeIndex, if timezone naive or aware with the same timezone - Index of object dtype, if timezone aware with mixed time offsets - Series: Series of datetime64 dtype - DataFrame: Series of datetime64 dtype - scalar: Timestamp In case when it is not possible to return designated types (e.g. when any element of input is before Timestamp.min or after Timestamp.max) return will have datetime.datetime type (or corresponding array/Series). See Also -------- DataFrame.astype : Cast argument to a specified dtype. to_timedelta : Convert argument to timedelta. convert_dtypes : Convert dtypes. Examples -------- Assembling a datetime from multiple columns of a DataFrame. The keys can be common abbreviations like ['year', 'month', 'day', 'minute', 'second', 'ms', 'us', 'ns']) or plurals of the same >>> df = pd.DataFrame({'year': [2015, 2016], ... 'month': [2, 3], ... 'day': [4, 5]}) >>> pd.to_datetime(df) 0 2015-02-04 1 2016-03-05 dtype: datetime64[ns] If a date does not meet the `timestamp limitations <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html #timeseries-timestamp-limits>`_, passing errors='ignore' will return the original input instead of raising any exception. Passing errors='coerce' will force an out-of-bounds date to NaT, in addition to forcing non-dates (or non-parseable dates) to NaT. >>> pd.to_datetime('13000101', format='%Y%m%d', errors='ignore') datetime.datetime(1300, 1, 1, 0, 0) >>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce') NaT Passing infer_datetime_format=True can often-times speedup a parsing if its not an ISO8601 format exactly, but in a regular format. >>> s = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000'] * 1000) >>> s.head() 0 3/11/2000 1 3/12/2000 2 3/13/2000 3 3/11/2000 4 3/12/2000 dtype: object >>> %timeit pd.to_datetime(s, infer_datetime_format=True) # doctest: +SKIP 100 loops, best of 3: 10.4 ms per loop >>> %timeit pd.to_datetime(s, infer_datetime_format=False) # doctest: +SKIP 1 loop, best of 3: 471 ms per loop Using a unix epoch time >>> pd.to_datetime(1490195805, unit='s') Timestamp('2017-03-22 15:16:45') >>> pd.to_datetime(1490195805433502912, unit='ns') Timestamp('2017-03-22 15:16:45.433502912') .. warning:: For float arg, precision rounding might happen. To prevent unexpected behavior use a fixed-width exact type. Using a non-unix epoch origin >>> pd.to_datetime([1, 2, 3], unit='D', ... origin=pd.Timestamp('1960-01-01')) DatetimeIndex(['1960-01-02', '1960-01-03', '1960-01-04'], dtype='datetime64[ns]', freq=None) In case input is list-like and the elements of input are of mixed timezones, return will have object type Index if utc=False. >>> pd.to_datetime(['2018-10-26 12:00 -0530', '2018-10-26 12:00 -0500']) Index([2018-10-26 12:00:00-05:30, 2018-10-26 12:00:00-05:00], dtype='object') >>> pd.to_datetime(['2018-10-26 12:00 -0530', '2018-10-26 12:00 -0500'], ... utc=True) DatetimeIndex(['2018-10-26 17:30:00+00:00', '2018-10-26 17:00:00+00:00'], dtype='datetime64[ns, UTC]', freq=None) ################################################################################ ################################## Validation ################################## ################################################################################ 2 Errors found: No extended summary found flake8 error: E999 SyntaxError: invalid syntax ``` There are still some errors from master branch.
https://api.github.com/repos/pandas-dev/pandas/pulls/44007
2021-10-13T04:18:04Z
2021-10-17T20:55:07Z
2021-10-17T20:55:07Z
2021-10-18T15:57:24Z
CLN: pragma no cover, post-array_ufunc
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index 82f9280870d59..4570957c01016 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -1428,13 +1428,13 @@ def diff_2d( # see https://github.com/cython/cython/issues/2646 if (out_t is float32_t and not (diff_t is float32_t or diff_t is int8_t or diff_t is int16_t)): - raise NotImplementedError + raise NotImplementedError # pragma: no cover elif (out_t is float64_t and (diff_t is float32_t or diff_t is int8_t or diff_t is int16_t)): - raise NotImplementedError + raise NotImplementedError # pragma: no cover elif out_t is int64_t and diff_t is not int64_t: # We only have out_t of int64_t if we have datetimelike - raise NotImplementedError + raise NotImplementedError # pragma: no cover else: # We put this inside an indented else block to avoid cython build # warnings about unreachable code diff --git a/pandas/_libs/arrays.pyx b/pandas/_libs/arrays.pyx index a2d4cf3000ee1..dc91e9bf755ff 100644 --- a/pandas/_libs/arrays.pyx +++ b/pandas/_libs/arrays.pyx @@ -84,7 +84,7 @@ cdef class NDArrayBacked: elif "_ndarray" in state: data = state.pop("_ndarray") else: - raise ValueError + raise ValueError # pragma: no cover self._ndarray = data self._dtype = state.pop("_dtype") @@ -95,7 +95,7 @@ cdef class NDArrayBacked: if len(state) == 1 and isinstance(state[0], dict): self.__setstate__(state[0]) return - raise NotImplementedError(state) + raise NotImplementedError(state) # pragma: no cover data, dtype = state[:2] if isinstance(dtype, np.ndarray): @@ -107,9 +107,9 @@ cdef class NDArrayBacked: for key, val in state[2].items(): setattr(self, key, val) else: - raise NotImplementedError(state) + raise NotImplementedError(state) # pragma: no cover else: - raise NotImplementedError(state) + raise NotImplementedError(state) # pragma: no cover def __len__(self) -> int: return len(self._ndarray) diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index ea0bebea8299b..43fe9f1d091c8 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -264,7 +264,7 @@ cdef class IndexEngine: return algos.is_monotonic(values, timelike=False) cdef _make_hash_table(self, Py_ssize_t n): - raise NotImplementedError + raise NotImplementedError # pragma: no cover cdef _check_type(self, object val): hash(val) @@ -607,7 +607,7 @@ cdef class BaseMultiIndexCodesEngine: self._base.__init__(self, lab_ints) def _codes_to_ints(self, ndarray[uint64_t] codes) -> np.ndarray: - raise NotImplementedError("Implemented by subclass") + raise NotImplementedError("Implemented by subclass") # pragma: no cover def _extract_level_codes(self, target) -> np.ndarray: """ diff --git a/pandas/_libs/indexing.pyx b/pandas/_libs/indexing.pyx index bdbaa05138072..181de174c53fb 100644 --- a/pandas/_libs/indexing.pyx +++ b/pandas/_libs/indexing.pyx @@ -19,7 +19,7 @@ cdef class NDFrameIndexerBase: if ndim is None: ndim = self._ndim = self.obj.ndim if ndim > 2: - raise ValueError( + raise ValueError( # pragma: no cover "NDFrameIndexer does not support NDFrame objects with ndim > 2" ) return ndim diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx index 2f0bcefefaaa1..9fa84a0135a5e 100644 --- a/pandas/_libs/internals.pyx +++ b/pandas/_libs/internals.pyx @@ -338,7 +338,7 @@ cpdef Py_ssize_t slice_len(slice slc, Py_ssize_t objlen=PY_SSIZE_T_MAX) except - Py_ssize_t start, stop, step, length if slc is None: - raise TypeError("slc must be slice") + raise TypeError("slc must be slice") # pragma: no cover PySlice_GetIndicesEx(slc, objlen, &start, &stop, &step, &length) @@ -358,7 +358,7 @@ cdef (Py_ssize_t, Py_ssize_t, Py_ssize_t, Py_ssize_t) slice_get_indices_ex( Py_ssize_t start, stop, step, length if slc is None: - raise TypeError("slc should be a slice") + raise TypeError("slc should be a slice") # pragma: no cover PySlice_GetIndicesEx(slc, objlen, &start, &stop, &step, &length) diff --git a/pandas/_libs/tslibs/dtypes.pyx b/pandas/_libs/tslibs/dtypes.pyx index 56be8bbfdcad2..ea5454572ca7e 100644 --- a/pandas/_libs/tslibs/dtypes.pyx +++ b/pandas/_libs/tslibs/dtypes.pyx @@ -205,7 +205,7 @@ class Resolution(Enum): elif self == Resolution.RESO_YR: return FreqGroup.FR_ANN else: - raise ValueError(self) + raise ValueError(self) # pragma: no cover @property def attrname(self) -> str: diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index ea714ce0162bc..5557882e7e9b9 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -574,7 +574,7 @@ cdef class BaseOffset: When the specific offset subclass does not have a vectorized implementation. """ - raise NotImplementedError( + raise NotImplementedError( # pragma: no cover f"DateOffset subclass {type(self).__name__} " "does not have a vectorized implementation" ) diff --git a/pandas/core/array_algos/take.py b/pandas/core/array_algos/take.py index 6d350cfa2c1d6..7de071cc6e9c3 100644 --- a/pandas/core/array_algos/take.py +++ b/pandas/core/array_algos/take.py @@ -365,6 +365,9 @@ def wrapper( ("datetime64[ns]", "datetime64[ns]"): _view_wrapper( libalgos.take_1d_int64_int64, np.int64, np.int64, np.int64 ), + ("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper( + libalgos.take_1d_int64_int64, np.int64, np.int64, np.int64 + ), } _take_2d_axis0_dict = { @@ -394,6 +397,9 @@ def wrapper( ("datetime64[ns]", "datetime64[ns]"): _view_wrapper( libalgos.take_2d_axis0_int64_int64, np.int64, np.int64, fill_wrap=np.int64 ), + ("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper( + libalgos.take_2d_axis0_int64_int64, np.int64, np.int64, fill_wrap=np.int64 + ), } _take_2d_axis1_dict = { @@ -423,6 +429,9 @@ def wrapper( ("datetime64[ns]", "datetime64[ns]"): _view_wrapper( libalgos.take_2d_axis1_int64_int64, np.int64, np.int64, fill_wrap=np.int64 ), + ("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper( + libalgos.take_2d_axis1_int64_int64, np.int64, np.int64, fill_wrap=np.int64 + ), } _take_2d_multi_dict = { @@ -452,6 +461,9 @@ def wrapper( ("datetime64[ns]", "datetime64[ns]"): _view_wrapper( libalgos.take_2d_multi_int64_int64, np.int64, np.int64, fill_wrap=np.int64 ), + ("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper( + libalgos.take_2d_multi_int64_int64, np.int64, np.int64, fill_wrap=np.int64 + ), } diff --git a/pandas/core/generic.py b/pandas/core/generic.py index b235f120d98c8..d53e965fc7d1c 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2057,54 +2057,12 @@ def empty(self) -> bool_t: def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray: return np.asarray(self._values, dtype=dtype) - def __array_wrap__( - self, - result: np.ndarray, - context: tuple[Callable, tuple[Any, ...], int] | None = None, - ): - """ - Gets called after a ufunc and other functions. - - Parameters - ---------- - result: np.ndarray - The result of the ufunc or other function called on the NumPy array - returned by __array__ - context: tuple of (func, tuple, int) - This parameter is returned by ufuncs as a 3-element tuple: (name of the - ufunc, arguments of the ufunc, domain of the ufunc), but is not set by - other numpy functions.q - - Notes - ----- - Series implements __array_ufunc_ so this not called for ufunc on Series. - """ - res = lib.item_from_zerodim(result) - if is_scalar(res): - # e.g. we get here with np.ptp(series) - # ptp also requires the item_from_zerodim - return res - d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False) - # error: Argument 1 to "NDFrame" has incompatible type "ndarray"; - # expected "BlockManager" - return self._constructor(res, **d).__finalize__( # type: ignore[arg-type] - self, method="__array_wrap__" - ) - @final def __array_ufunc__( self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any ): return arraylike.array_ufunc(self, ufunc, method, *inputs, **kwargs) - # ideally we would define this to avoid the getattr checks, but - # is slower - # @property - # def __array_interface__(self): - # """ provide numpy array interface method """ - # values = self.values - # return dict(typestr=values.dtype.str,shape=values.shape,data=values) - # ---------------------------------------------------------------------- # Picklability diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index f6bb8e7af3558..d6408ce7bdd28 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -867,7 +867,7 @@ def __array_ufunc__(self, ufunc: np.ufunc, method: str_t, *inputs, **kwargs): def __array_wrap__(self, result, context=None): """ - Gets called after a ufunc and other functions. + Gets called after a ufunc and other functions e.g. np.split. """ result = lib.item_from_zerodim(result) if is_bool_dtype(result) or lib.is_scalar(result) or np.ndim(result) > 1: @@ -6166,14 +6166,10 @@ def get_slice_bound( raise err if isinstance(slc, np.ndarray): - # get_loc may return a boolean array or an array of indices, which + # get_loc may return a boolean array, which # is OK as long as they are representable by a slice. - if is_bool_dtype(slc): - slc = lib.maybe_booleans_to_slice(slc.view("u1")) - else: - slc = lib.maybe_indices_to_slice( - slc.astype(np.intp, copy=False), len(self) - ) + assert is_bool_dtype(slc.dtype) + slc = lib.maybe_booleans_to_slice(slc.view("u1")) if isinstance(slc, np.ndarray): raise KeyError( f"Cannot get {side} slice bound for non-unique "
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44006
2021-10-13T04:01:56Z
2021-10-15T13:52:08Z
2021-10-15T13:52:08Z
2022-01-18T16:00:19Z
TST: Adding test to test_numeric, #37348
diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py index 2e4551a449267..9932adccdbaf2 100644 --- a/pandas/tests/arithmetic/test_numeric.py +++ b/pandas/tests/arithmetic/test_numeric.py @@ -1429,3 +1429,16 @@ def test_sub_multiindex_swapped_levels(): result = df - df2 expected = pd.DataFrame([0.0] * 6, columns=["a"], index=df.index) tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("power", [1, 2, 5]) +@pytest.mark.parametrize("string_size", [0, 1, 2, 5]) +def test_empty_str_comparison(power, string_size): + # GH 37348 + a = np.array(range(10 ** power)) + right = pd.DataFrame(a, dtype=np.int64) + left = " " * string_size + + result = right == left + expected = pd.DataFrame(np.zeros(right.shape, dtype=bool)) + tm.assert_frame_equal(result, expected)
- [x] closes #37348 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them This makes sure that comparing int to empty strings doesn't return an error.
https://api.github.com/repos/pandas-dev/pandas/pulls/44005
2021-10-13T01:12:19Z
2021-10-16T14:46:41Z
2021-10-16T14:46:40Z
2021-10-16T14:46:44Z
REF: de-duplicate MaskedArray tests
diff --git a/pandas/tests/arrays/boolean/test_comparison.py b/pandas/tests/arrays/boolean/test_comparison.py index 726b78fbd43bd..8730837b518e5 100644 --- a/pandas/tests/arrays/boolean/test_comparison.py +++ b/pandas/tests/arrays/boolean/test_comparison.py @@ -4,7 +4,7 @@ import pandas as pd import pandas._testing as tm from pandas.arrays import BooleanArray -from pandas.tests.extension.base import BaseOpsUtil +from pandas.tests.arrays.masked_shared import ComparisonOps @pytest.fixture @@ -15,30 +15,12 @@ def data(): ) -class TestComparisonOps(BaseOpsUtil): - def _compare_other(self, data, op_name, other): - op = self.get_op_from_name(op_name) - - # array - result = pd.Series(op(data, other)) - expected = pd.Series(op(data._data, other), dtype="boolean") - # propagate NAs - expected[data._mask] = pd.NA - - tm.assert_series_equal(result, expected) - - # series - s = pd.Series(data) - result = op(s, other) - - expected = pd.Series(data._data) - expected = op(expected, other) - expected = expected.astype("boolean") - # propagate NAs - expected[data._mask] = pd.NA +@pytest.fixture +def dtype(): + return pd.BooleanDtype() - tm.assert_series_equal(result, expected) +class TestComparisonOps(ComparisonOps): def test_compare_scalar(self, data, all_compare_operators): op_name = all_compare_operators self._compare_other(data, op_name, True) @@ -53,24 +35,8 @@ def test_compare_array(self, data, all_compare_operators): self._compare_other(data, op_name, other) @pytest.mark.parametrize("other", [True, False, pd.NA]) - def test_scalar(self, other, all_compare_operators): - op = self.get_op_from_name(all_compare_operators) - a = pd.array([True, False, None], dtype="boolean") - - result = op(a, other) - - if other is pd.NA: - expected = pd.array([None, None, None], dtype="boolean") - else: - values = op(a._data, other) - expected = BooleanArray(values, a._mask, copy=True) - tm.assert_extension_array_equal(result, expected) - - # ensure we haven't mutated anything inplace - result[0] = None - tm.assert_extension_array_equal( - a, pd.array([True, False, None], dtype="boolean") - ) + def test_scalar(self, other, all_compare_operators, dtype): + ComparisonOps.test_scalar(self, other, all_compare_operators, dtype) def test_array(self, all_compare_operators): op = self.get_op_from_name(all_compare_operators) diff --git a/pandas/tests/arrays/floating/test_comparison.py b/pandas/tests/arrays/floating/test_comparison.py index 5538367f49e5b..bfd54e125159c 100644 --- a/pandas/tests/arrays/floating/test_comparison.py +++ b/pandas/tests/arrays/floating/test_comparison.py @@ -1,86 +1,17 @@ -import numpy as np import pytest import pandas as pd import pandas._testing as tm -from pandas.tests.extension.base import BaseOpsUtil +from pandas.tests.arrays.masked_shared import ( + ComparisonOps, + NumericOps, +) -class TestComparisonOps(BaseOpsUtil): - def _compare_other(self, data, op_name, other): - op = self.get_op_from_name(op_name) - - # array - result = pd.Series(op(data, other)) - expected = pd.Series(op(data._data, other), dtype="boolean") - - # fill the nan locations - expected[data._mask] = pd.NA - - tm.assert_series_equal(result, expected) - - # series - s = pd.Series(data) - result = op(s, other) - - expected = op(pd.Series(data._data), other) - - # fill the nan locations - expected[data._mask] = pd.NA - expected = expected.astype("boolean") - - tm.assert_series_equal(result, expected) - +class TestComparisonOps(NumericOps, ComparisonOps): @pytest.mark.parametrize("other", [True, False, pd.NA, -1.0, 0.0, 1]) - def test_scalar(self, other, all_compare_operators): - op = self.get_op_from_name(all_compare_operators) - a = pd.array([1.0, 0.0, None], dtype="Float64") - - result = op(a, other) - - if other is pd.NA: - expected = pd.array([None, None, None], dtype="boolean") - else: - values = op(a._data, other) - expected = pd.arrays.BooleanArray(values, a._mask, copy=True) - tm.assert_extension_array_equal(result, expected) - - # ensure we haven't mutated anything inplace - result[0] = pd.NA - tm.assert_extension_array_equal(a, pd.array([1.0, 0.0, None], dtype="Float64")) - - def test_array(self, all_compare_operators): - op = self.get_op_from_name(all_compare_operators) - a = pd.array([0, 1, 2, None, None, None], dtype="Float64") - b = pd.array([0, 1, None, 0, 1, None], dtype="Float64") - - result = op(a, b) - values = op(a._data, b._data) - mask = a._mask | b._mask - - expected = pd.arrays.BooleanArray(values, mask) - tm.assert_extension_array_equal(result, expected) - - # ensure we haven't mutated anything inplace - result[0] = pd.NA - tm.assert_extension_array_equal( - a, pd.array([0, 1, 2, None, None, None], dtype="Float64") - ) - tm.assert_extension_array_equal( - b, pd.array([0, 1, None, 0, 1, None], dtype="Float64") - ) - - def test_compare_with_booleanarray(self, all_compare_operators): - op = self.get_op_from_name(all_compare_operators) - a = pd.array([True, False, None] * 3, dtype="boolean") - b = pd.array([0] * 3 + [1] * 3 + [None] * 3, dtype="Float64") - other = pd.array([False] * 3 + [True] * 3 + [None] * 3, dtype="boolean") - expected = op(a, other) - result = op(a, b) - tm.assert_extension_array_equal(result, expected) - expected = op(other, a) - result = op(b, a) - tm.assert_extension_array_equal(result, expected) + def test_scalar(self, other, all_compare_operators, dtype): + ComparisonOps.test_scalar(self, other, all_compare_operators, dtype) def test_compare_with_integerarray(self, all_compare_operators): op = self.get_op_from_name(all_compare_operators) @@ -94,18 +25,6 @@ def test_compare_with_integerarray(self, all_compare_operators): result = op(b, a) tm.assert_extension_array_equal(result, expected) - def test_no_shared_mask(self, data): - result = data + 1 - assert np.shares_memory(result._mask, data._mask) is False - - def test_compare_to_string(self, dtype): - # GH 28930 - s = pd.Series([1, None], dtype=dtype) - result = s == "a" - expected = pd.Series([False, pd.NA], dtype="boolean") - - self.assert_series_equal(result, expected) - def test_equals(): # GH-30652 diff --git a/pandas/tests/arrays/integer/test_comparison.py b/pandas/tests/arrays/integer/test_comparison.py index 2f12ffacfb419..043f5d64d159b 100644 --- a/pandas/tests/arrays/integer/test_comparison.py +++ b/pandas/tests/arrays/integer/test_comparison.py @@ -1,99 +1,20 @@ -import numpy as np import pytest import pandas as pd -import pandas._testing as tm -from pandas.tests.extension.base import BaseOpsUtil +from pandas.tests.arrays.masked_shared import ( + ComparisonOps, + NumericOps, +) -class TestComparisonOps(BaseOpsUtil): - def _compare_other(self, data, op_name, other): - op = self.get_op_from_name(op_name) - - # array - result = pd.Series(op(data, other)) - expected = pd.Series(op(data._data, other), dtype="boolean") - - # fill the nan locations - expected[data._mask] = pd.NA - - tm.assert_series_equal(result, expected) - - # series - s = pd.Series(data) - result = op(s, other) - - expected = op(pd.Series(data._data), other) - - # fill the nan locations - expected[data._mask] = pd.NA - expected = expected.astype("boolean") - - tm.assert_series_equal(result, expected) - +class TestComparisonOps(NumericOps, ComparisonOps): @pytest.mark.parametrize("other", [True, False, pd.NA, -1, 0, 1]) - def test_scalar(self, other, all_compare_operators): - op = self.get_op_from_name(all_compare_operators) - a = pd.array([1, 0, None], dtype="Int64") - - result = op(a, other) - - if other is pd.NA: - expected = pd.array([None, None, None], dtype="boolean") - else: - values = op(a._data, other) - expected = pd.arrays.BooleanArray(values, a._mask, copy=True) - tm.assert_extension_array_equal(result, expected) - - # ensure we haven't mutated anything inplace - result[0] = pd.NA - tm.assert_extension_array_equal(a, pd.array([1, 0, None], dtype="Int64")) - - def test_array(self, all_compare_operators): - op = self.get_op_from_name(all_compare_operators) - a = pd.array([0, 1, 2, None, None, None], dtype="Int64") - b = pd.array([0, 1, None, 0, 1, None], dtype="Int64") - - result = op(a, b) - values = op(a._data, b._data) - mask = a._mask | b._mask - - expected = pd.arrays.BooleanArray(values, mask) - tm.assert_extension_array_equal(result, expected) - - # ensure we haven't mutated anything inplace - result[0] = pd.NA - tm.assert_extension_array_equal( - a, pd.array([0, 1, 2, None, None, None], dtype="Int64") - ) - tm.assert_extension_array_equal( - b, pd.array([0, 1, None, 0, 1, None], dtype="Int64") - ) - - def test_compare_with_booleanarray(self, all_compare_operators): - op = self.get_op_from_name(all_compare_operators) - a = pd.array([True, False, None] * 3, dtype="boolean") - b = pd.array([0] * 3 + [1] * 3 + [None] * 3, dtype="Int64") - other = pd.array([False] * 3 + [True] * 3 + [None] * 3, dtype="boolean") - expected = op(a, other) - result = op(a, b) - tm.assert_extension_array_equal(result, expected) - - def test_no_shared_mask(self, data): - result = data + 1 - assert np.shares_memory(result._mask, data._mask) is False - - def test_compare_to_string(self, any_int_ea_dtype): - # GH 28930 - s = pd.Series([1, None], dtype=any_int_ea_dtype) - result = s == "a" - expected = pd.Series([False, pd.NA], dtype="boolean") - - self.assert_series_equal(result, expected) + def test_scalar(self, other, all_compare_operators, dtype): + ComparisonOps.test_scalar(self, other, all_compare_operators, dtype) - def test_compare_to_int(self, any_int_ea_dtype, all_compare_operators): + def test_compare_to_int(self, dtype, all_compare_operators): # GH 28930 - s1 = pd.Series([1, None, 3], dtype=any_int_ea_dtype) + s1 = pd.Series([1, None, 3], dtype=dtype) s2 = pd.Series([1, None, 3], dtype="float") method = getattr(s1, all_compare_operators) diff --git a/pandas/tests/arrays/masked_shared.py b/pandas/tests/arrays/masked_shared.py new file mode 100644 index 0000000000000..1a461777e08e3 --- /dev/null +++ b/pandas/tests/arrays/masked_shared.py @@ -0,0 +1,106 @@ +""" +Tests shared by MaskedArray subclasses. +""" +import numpy as np + +import pandas as pd +import pandas._testing as tm +from pandas.tests.extension.base import BaseOpsUtil + + +class ComparisonOps(BaseOpsUtil): + def _compare_other(self, data, op_name, other): + op = self.get_op_from_name(op_name) + + # array + result = pd.Series(op(data, other)) + expected = pd.Series(op(data._data, other), dtype="boolean") + + # fill the nan locations + expected[data._mask] = pd.NA + + tm.assert_series_equal(result, expected) + + # series + ser = pd.Series(data) + result = op(ser, other) + + expected = op(pd.Series(data._data), other) + + # fill the nan locations + expected[data._mask] = pd.NA + expected = expected.astype("boolean") + + tm.assert_series_equal(result, expected) + + # subclass will override to parametrize 'other' + def test_scalar(self, other, all_compare_operators, dtype): + op = self.get_op_from_name(all_compare_operators) + left = pd.array([1, 0, None], dtype=dtype) + + result = op(left, other) + + if other is pd.NA: + expected = pd.array([None, None, None], dtype="boolean") + else: + values = op(left._data, other) + expected = pd.arrays.BooleanArray(values, left._mask, copy=True) + tm.assert_extension_array_equal(result, expected) + + # ensure we haven't mutated anything inplace + result[0] = pd.NA + tm.assert_extension_array_equal(left, pd.array([1, 0, None], dtype=dtype)) + + +class NumericOps: + # Shared by IntegerArray and FloatingArray, not BooleanArray + + def test_no_shared_mask(self, data): + result = data + 1 + assert np.shares_memory(result._mask, data._mask) is False + + def test_array(self, all_compare_operators, dtype): + op = self.get_op_from_name(all_compare_operators) + + left = pd.array([0, 1, 2, None, None, None], dtype=dtype) + right = pd.array([0, 1, None, 0, 1, None], dtype=dtype) + + result = op(left, right) + values = op(left._data, right._data) + mask = left._mask | right._mask + + expected = pd.arrays.BooleanArray(values, mask) + tm.assert_extension_array_equal(result, expected) + + # ensure we haven't mutated anything inplace + result[0] = pd.NA + tm.assert_extension_array_equal( + left, pd.array([0, 1, 2, None, None, None], dtype=dtype) + ) + tm.assert_extension_array_equal( + right, pd.array([0, 1, None, 0, 1, None], dtype=dtype) + ) + + def test_compare_with_booleanarray(self, all_compare_operators, dtype): + op = self.get_op_from_name(all_compare_operators) + + left = pd.array([True, False, None] * 3, dtype="boolean") + right = pd.array([0] * 3 + [1] * 3 + [None] * 3, dtype=dtype) + other = pd.array([False] * 3 + [True] * 3 + [None] * 3, dtype="boolean") + + expected = op(left, other) + result = op(left, right) + tm.assert_extension_array_equal(result, expected) + + # reversed op + expected = op(other, left) + result = op(right, left) + tm.assert_extension_array_equal(result, expected) + + def test_compare_to_string(self, dtype): + # GH#28930 + ser = pd.Series([1, None], dtype=dtype) + result = ser == "a" + expected = pd.Series([False, pd.NA], dtype="boolean") + + self.assert_series_equal(result, expected) diff --git a/pandas/tests/extension/base/ops.py b/pandas/tests/extension/base/ops.py index e9ceec3a3d7e6..88437321b1028 100644 --- a/pandas/tests/extension/base/ops.py +++ b/pandas/tests/extension/base/ops.py @@ -10,13 +10,13 @@ class BaseOpsUtil(BaseExtensionTests): - def get_op_from_name(self, op_name): + def get_op_from_name(self, op_name: str): return tm.get_op_from_name(op_name) - def check_opname(self, s, op_name, other, exc=Exception): + def check_opname(self, ser: pd.Series, op_name: str, other, exc=Exception): op = self.get_op_from_name(op_name) - self._check_op(s, op, other, op_name, exc) + self._check_op(ser, op, other, op_name, exc) def _combine(self, obj, other, op): if isinstance(obj, pd.DataFrame): @@ -27,29 +27,31 @@ def _combine(self, obj, other, op): expected = obj.combine(other, op) return expected - def _check_op(self, s, op, other, op_name, exc=NotImplementedError): + def _check_op( + self, ser: pd.Series, op, other, op_name: str, exc=NotImplementedError + ): if exc is None: - result = op(s, other) - expected = self._combine(s, other, op) - assert isinstance(result, type(s)) + result = op(ser, other) + expected = self._combine(ser, other, op) + assert isinstance(result, type(ser)) self.assert_equal(result, expected) else: with pytest.raises(exc): - op(s, other) + op(ser, other) - def _check_divmod_op(self, s, op, other, exc=Exception): + def _check_divmod_op(self, ser: pd.Series, op, other, exc=Exception): # divmod has multiple return values, so check separately if exc is None: - result_div, result_mod = op(s, other) + result_div, result_mod = op(ser, other) if op is divmod: - expected_div, expected_mod = s // other, s % other + expected_div, expected_mod = ser // other, ser % other else: - expected_div, expected_mod = other // s, other % s + expected_div, expected_mod = other // ser, other % ser self.assert_series_equal(result_div, expected_div) self.assert_series_equal(result_mod, expected_mod) else: with pytest.raises(exc): - divmod(s, other) + divmod(ser, other) class BaseArithmeticOpsTests(BaseOpsUtil): @@ -73,8 +75,8 @@ class BaseArithmeticOpsTests(BaseOpsUtil): def test_arith_series_with_scalar(self, data, all_arithmetic_operators): # series & scalar op_name = all_arithmetic_operators - s = pd.Series(data) - self.check_opname(s, op_name, s.iloc[0], exc=self.series_scalar_exc) + ser = pd.Series(data) + self.check_opname(ser, op_name, ser.iloc[0], exc=self.series_scalar_exc) def test_arith_frame_with_scalar(self, data, all_arithmetic_operators): # frame & scalar @@ -85,29 +87,29 @@ def test_arith_frame_with_scalar(self, data, all_arithmetic_operators): def test_arith_series_with_array(self, data, all_arithmetic_operators): # ndarray & other series op_name = all_arithmetic_operators - s = pd.Series(data) + ser = pd.Series(data) self.check_opname( - s, op_name, pd.Series([s.iloc[0]] * len(s)), exc=self.series_array_exc + ser, op_name, pd.Series([ser.iloc[0]] * len(ser)), exc=self.series_array_exc ) def test_divmod(self, data): - s = pd.Series(data) - self._check_divmod_op(s, divmod, 1, exc=self.divmod_exc) - self._check_divmod_op(1, ops.rdivmod, s, exc=self.divmod_exc) + ser = pd.Series(data) + self._check_divmod_op(ser, divmod, 1, exc=self.divmod_exc) + self._check_divmod_op(1, ops.rdivmod, ser, exc=self.divmod_exc) def test_divmod_series_array(self, data, data_for_twos): - s = pd.Series(data) - self._check_divmod_op(s, divmod, data) + ser = pd.Series(data) + self._check_divmod_op(ser, divmod, data) other = data_for_twos - self._check_divmod_op(other, ops.rdivmod, s) + self._check_divmod_op(other, ops.rdivmod, ser) other = pd.Series(other) - self._check_divmod_op(other, ops.rdivmod, s) + self._check_divmod_op(other, ops.rdivmod, ser) def test_add_series_with_extension_array(self, data): - s = pd.Series(data) - result = s + data + ser = pd.Series(data) + result = ser + data expected = pd.Series(data + data) self.assert_series_equal(result, expected) @@ -128,35 +130,40 @@ def test_direct_arith_with_ndframe_returns_not_implemented(self, data, box): class BaseComparisonOpsTests(BaseOpsUtil): """Various Series and DataFrame comparison ops methods.""" - def _compare_other(self, s, data, op_name, other): + def _compare_other(self, ser: pd.Series, data, op_name: str, other): op = self.get_op_from_name(op_name) if op_name in ["__eq__", "__ne__"]: # comparison should match point-wise comparisons - result = op(s, other) - expected = s.combine(other, op) + result = op(ser, other) + expected = ser.combine(other, op) self.assert_series_equal(result, expected) else: - - # array - assert getattr(data, op_name)(other) is NotImplemented - - # series - s = pd.Series(data) - with pytest.raises(TypeError): - op(s, other) + exc = None + try: + result = op(ser, other) + except Exception as err: + exc = err + + if exc is None: + # Didn't error, then should match pointwise behavior + expected = ser.combine(other, op) + self.assert_series_equal(result, expected) + else: + with pytest.raises(type(exc)): + ser.combine(other, op) def test_compare_scalar(self, data, all_compare_operators): op_name = all_compare_operators - s = pd.Series(data) - self._compare_other(s, data, op_name, 0) + ser = pd.Series(data) + self._compare_other(ser, data, op_name, 0) def test_compare_array(self, data, all_compare_operators): op_name = all_compare_operators - s = pd.Series(data) + ser = pd.Series(data) other = pd.Series([data[0]] * len(data)) - self._compare_other(s, data, op_name, other) + self._compare_other(ser, data, op_name, other) @pytest.mark.parametrize("box", [pd.Series, pd.DataFrame]) def test_direct_arith_with_ndframe_returns_not_implemented(self, data, box): @@ -181,8 +188,8 @@ def test_direct_arith_with_ndframe_returns_not_implemented(self, data, box): class BaseUnaryOpsTests(BaseOpsUtil): def test_invert(self, data): - s = pd.Series(data, name="name") - result = ~s + ser = pd.Series(data, name="name") + result = ~ser expected = pd.Series(~data, name="name") self.assert_series_equal(result, expected) diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py index 99d92a5bbf774..e9b4ceafddfd5 100644 --- a/pandas/tests/extension/decimal/test_decimal.py +++ b/pandas/tests/extension/decimal/test_decimal.py @@ -314,12 +314,6 @@ def _check_divmod_op(self, s, op, other, exc=NotImplementedError): class TestComparisonOps(BaseDecimal, base.BaseComparisonOpsTests): - def check_opname(self, s, op_name, other, exc=None): - super().check_opname(s, op_name, other, exc=None) - - def _compare_other(self, s, data, op_name, other): - self.check_opname(s, op_name, other) - def test_compare_scalar(self, data, all_compare_operators): op_name = all_compare_operators s = pd.Series(data) diff --git a/pandas/tests/extension/test_boolean.py b/pandas/tests/extension/test_boolean.py index 2ee26e139f3a6..9260c342caa6b 100644 --- a/pandas/tests/extension/test_boolean.py +++ b/pandas/tests/extension/test_boolean.py @@ -150,9 +150,6 @@ def check_opname(self, s, op_name, other, exc=None): # overwriting to indicate ops don't raise an error super().check_opname(s, op_name, other, exc=None) - def _compare_other(self, s, data, op_name, other): - self.check_opname(s, op_name, other) - @pytest.mark.skip(reason="Tested in tests/arrays/test_boolean.py") def test_compare_scalar(self, data, all_compare_operators): pass diff --git a/pandas/tests/extension/test_datetime.py b/pandas/tests/extension/test_datetime.py index 54e31e05e8b0e..de5a6b7a5bb06 100644 --- a/pandas/tests/extension/test_datetime.py +++ b/pandas/tests/extension/test_datetime.py @@ -172,10 +172,7 @@ class TestCasting(BaseDatetimeTests, base.BaseCastingTests): class TestComparisonOps(BaseDatetimeTests, base.BaseComparisonOpsTests): - def _compare_other(self, s, data, op_name, other): - # the base test is not appropriate for us. We raise on comparison - # with (some) integers, depending on the value. - pass + pass class TestMissing(BaseDatetimeTests, base.BaseMissingTests): @@ -187,12 +184,6 @@ class TestReshaping(BaseDatetimeTests, base.BaseReshapingTests): def test_concat(self, data, in_frame): pass - def test_concat_mixed_dtypes(self, data): - # concat(Series[datetimetz], Series[category]) uses a - # plain np.array(values) on the DatetimeArray, which - # drops the tz. - super().test_concat_mixed_dtypes(data) - class TestSetitem(BaseDatetimeTests, base.BaseSetitemTests): pass diff --git a/pandas/tests/extension/test_floating.py b/pandas/tests/extension/test_floating.py index f4d3243b5129f..173bc2d05af2f 100644 --- a/pandas/tests/extension/test_floating.py +++ b/pandas/tests/extension/test_floating.py @@ -128,6 +128,7 @@ def _check_divmod_op(self, s, op, other, exc=None): class TestComparisonOps(base.BaseComparisonOpsTests): + # TODO: share with IntegerArray? def _check_op(self, s, op, other, op_name, exc=NotImplementedError): if exc is None: result = op(s, other) diff --git a/pandas/tests/extension/test_period.py b/pandas/tests/extension/test_period.py index 4c845055b56c4..f210a4ce56091 100644 --- a/pandas/tests/extension/test_period.py +++ b/pandas/tests/extension/test_period.py @@ -154,10 +154,7 @@ class TestCasting(BasePeriodTests, base.BaseCastingTests): class TestComparisonOps(BasePeriodTests, base.BaseComparisonOpsTests): - def _compare_other(self, s, data, op_name, other): - # the base test is not appropriate for us. We raise on comparison - # with (some) integers, depending on the value. - pass + pass class TestMissing(BasePeriodTests, base.BaseMissingTests):
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44004
2021-10-12T23:01:21Z
2021-10-13T17:47:54Z
2021-10-13T17:47:54Z
2021-10-13T20:02:33Z
[BUG] Fix DataFrameGroupBy.boxplot with subplots=False fails for object columns
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 026429dabae84..63c18374b9154 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -542,7 +542,7 @@ Period Plotting ^^^^^^^^ -- +- When given non-numeric data, :meth:`DataFrame.boxplot` now raises a ``ValueError`` rather than a cryptic ``KeyError`` or ``ZeroDivsionError``, in line with other plotting functions like :meth:`DataFrame.hist`. (:issue:`43480`) - Groupby/resample/rolling diff --git a/pandas/plotting/_matplotlib/boxplot.py b/pandas/plotting/_matplotlib/boxplot.py index 1308a83f61443..a2089de294e22 100644 --- a/pandas/plotting/_matplotlib/boxplot.py +++ b/pandas/plotting/_matplotlib/boxplot.py @@ -391,6 +391,11 @@ def plot_group(keys, values, ax: Axes): with plt.rc_context(rc): ax = plt.gca() data = data._get_numeric_data() + naxes = len(data.columns) + if naxes == 0: + raise ValueError( + "boxplot method requires numerical columns, nothing to plot." + ) if columns is None: columns = data.columns else: diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py index dbceeae44a493..ce32e5801e461 100644 --- a/pandas/tests/plotting/test_boxplot_method.py +++ b/pandas/tests/plotting/test_boxplot_method.py @@ -543,6 +543,14 @@ def test_groupby_boxplot_subplots_false(self, col, expected_xticklabel): result_xticklabel = [x.get_text() for x in axes.get_xticklabels()] assert expected_xticklabel == result_xticklabel + def test_groupby_boxplot_object(self): + # GH 43480 + df = self.hist_df.astype("object") + grouped = df.groupby("gender") + msg = "boxplot method requires numerical columns, nothing to plot" + with pytest.raises(ValueError, match=msg): + _check_plot_works(grouped.boxplot, subplots=False) + def test_boxplot_multiindex_column(self): # GH 16748 arrays = [
- [x] closes #43480 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44003
2021-10-12T22:25:43Z
2021-11-14T03:08:55Z
2021-11-14T03:08:55Z
2021-11-14T03:08:59Z
BUG: Fix for rolling with uneven nanosecond windows have wrong results
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index c5a22d8766a96..e47eaa94eb412 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -506,6 +506,7 @@ Groupby/resample/rolling - Bug in :meth:`GroupBy.apply` with time-based :class:`Grouper` objects incorrectly raising ``ValueError`` in corner cases where the grouping vector contains a ``NaT`` (:issue:`43500`, :issue:`43515`) - Bug in :meth:`GroupBy.mean` failing with ``complex`` dtype (:issue:`43701`) - Fixed bug in :meth:`Series.rolling` and :meth:`DataFrame.rolling` not calculating window bounds correctly for the first row when ``center=True`` and index is decreasing (:issue:`43927`) +- Fixed bug in :meth:`Series.rolling` and :meth:`DataFrame.rolling` for centered datetimelike windows with uneven nanosecond (:issue:`43997`) - Bug in :meth:`GroupBy.nth` failing on ``axis=1`` (:issue:`43926`) - Fixed bug in :meth:`Series.rolling` and :meth:`DataFrame.rolling` not respecting right bound on centered datetime-like windows, if the index contain duplicates (:issue:`#3944`) diff --git a/pandas/_libs/window/indexers.pyx b/pandas/_libs/window/indexers.pyx index 59889cb58c3d5..4b3a858ade773 100644 --- a/pandas/_libs/window/indexers.pyx +++ b/pandas/_libs/window/indexers.pyx @@ -62,6 +62,14 @@ def calculate_variable_window_bounds( if closed in ['left', 'both']: left_closed = True + # GH 43997: + # If the forward and the backward facing windows + # would result in a fraction of 1/2 a nanosecond + # we need to make both interval ends inclusive. + if center and window_size % 2 == 1: + right_closed = True + left_closed = True + if index[num_values - 1] < index[0]: index_growth_sign = -1 diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index 6028e89bea374..d58eeaa7cbcb1 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -1276,6 +1276,23 @@ def test_rolling_decreasing_indices_centered(window, closed, expected, frame_or_ tm.assert_equal(result_dec, expected_dec) +@pytest.mark.parametrize( + "window,expected", + [ + ("1ns", [1.0, 1.0, 1.0, 1.0]), + ("3ns", [2.0, 3.0, 3.0, 2.0]), + ], +) +def test_rolling_center_nanosecond_resolution( + window, closed, expected, frame_or_series +): + index = date_range("2020", periods=4, freq="1ns") + df = frame_or_series([1, 1, 1, 1], index=index, dtype=float) + expected = frame_or_series(expected, index=index, dtype=float) + result = df.rolling(window, closed=closed, center=True).sum() + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( "method,expected", [
- [x] closes #43997 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/43998
2021-10-12T15:44:50Z
2021-10-15T00:59:21Z
2021-10-15T00:59:21Z
2021-10-15T00:59:28Z
MAINT: Correct small cast issues on Windows
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index e7f889ef39707..ec89e52e2eff7 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -543,7 +543,7 @@ def has_infs(floating[:] arr) -> bool: def maybe_indices_to_slice(ndarray[intp_t, ndim=1] indices, int max_len): cdef: Py_ssize_t i, n = len(indices) - int k, vstart, vlast, v + intp_t k, vstart, vlast, v if n == 0: return slice(0, 0) @@ -553,7 +553,7 @@ def maybe_indices_to_slice(ndarray[intp_t, ndim=1] indices, int max_len): return indices if n == 1: - return slice(vstart, vstart + 1) + return slice(vstart, <intp_t>(vstart + 1)) vlast = indices[n - 1] if vlast < 0 or max_len <= vlast: @@ -569,12 +569,12 @@ def maybe_indices_to_slice(ndarray[intp_t, ndim=1] indices, int max_len): return indices if k > 0: - return slice(vstart, vlast + 1, k) + return slice(vstart, <intp_t>(vlast + 1), k) else: if vlast == 0: return slice(vstart, None, k) else: - return slice(vstart, vlast - 1, k) + return slice(vstart, <intp_t>(vlast - 1), k) @cython.wraparound(False) diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index 5fe6818ff4b0e..c9f3e1f01a55c 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -203,7 +203,7 @@ cdef extern from "parser/tokenizer.h": int usecols - int expected_fields + Py_ssize_t expected_fields BadLineHandleMethod on_bad_lines # floating point options @@ -398,7 +398,7 @@ cdef class TextReader: else: if len(delimiter) > 1: raise ValueError('only length-1 separators excluded right now') - self.parser.delimiter = ord(delimiter) + self.parser.delimiter = <char>ord(delimiter) # ---------------------------------------- # parser options @@ -410,21 +410,21 @@ cdef class TextReader: if lineterminator is not None: if len(lineterminator) != 1: raise ValueError('Only length-1 line terminators supported') - self.parser.lineterminator = ord(lineterminator) + self.parser.lineterminator = <char>ord(lineterminator) if len(decimal) != 1: raise ValueError('Only length-1 decimal markers supported') - self.parser.decimal = ord(decimal) + self.parser.decimal = <char>ord(decimal) if thousands is not None: if len(thousands) != 1: raise ValueError('Only length-1 thousands markers supported') - self.parser.thousands = ord(thousands) + self.parser.thousands = <char>ord(thousands) if escapechar is not None: if len(escapechar) != 1: raise ValueError('Only length-1 escapes supported') - self.parser.escapechar = ord(escapechar) + self.parser.escapechar = <char>ord(escapechar) self._set_quoting(quotechar, quoting) @@ -437,7 +437,7 @@ cdef class TextReader: if comment is not None: if len(comment) > 1: raise ValueError('Only length-1 comment characters supported') - self.parser.commentchar = ord(comment) + self.parser.commentchar = <char>ord(comment) self.parser.on_bad_lines = on_bad_lines @@ -591,7 +591,7 @@ cdef class TextReader: raise TypeError('"quotechar" must be a 1-character string') else: self.parser.quoting = quoting - self.parser.quotechar = ord(quote_char) + self.parser.quotechar = <char>ord(quote_char) cdef _make_skiprow_set(self): if util.is_integer_object(self.skiprows): @@ -1045,8 +1045,8 @@ cdef class TextReader: return results # -> tuple["ArrayLike", int]: - cdef inline _convert_tokens(self, Py_ssize_t i, int start, int end, - object name, bint na_filter, + cdef inline _convert_tokens(self, Py_ssize_t i, int64_t start, + int64_t end, object name, bint na_filter, kh_str_starts_t *na_hashset, object na_flist, object col_dtype): @@ -1537,7 +1537,7 @@ cdef inline int _try_double_nogil(parser_t *parser, float64_t (*double_converter)( const char *, char **, char, char, char, int, int *, int *) nogil, - int col, int line_start, int line_end, + int64_t col, int64_t line_start, int64_t line_end, bint na_filter, kh_str_starts_t *na_hashset, bint use_na_flist, const kh_float64_t *na_flist, diff --git a/pandas/_libs/src/parser/tokenizer.c b/pandas/_libs/src/parser/tokenizer.c index 6785bf628919a..60c6180453c72 100644 --- a/pandas/_libs/src/parser/tokenizer.c +++ b/pandas/_libs/src/parser/tokenizer.c @@ -25,7 +25,8 @@ GitHub. See Python Software Foundation License and BSD licenses for these. #include "../headers/portable.h" -void coliter_setup(coliter_t *self, parser_t *parser, int i, int start) { +void coliter_setup(coliter_t *self, parser_t *parser, int64_t i, + int64_t start) { // column i, starting at 0 self->words = parser->words; self->col = i; @@ -411,7 +412,7 @@ static void append_warning(parser_t *self, const char *msg) { static int end_line(parser_t *self) { char *msg; int64_t fields; - int ex_fields = self->expected_fields; + int64_t ex_fields = self->expected_fields; int64_t bufsize = 100; // for error or warning messages fields = self->line_fields[self->lines]; @@ -459,8 +460,8 @@ static int end_line(parser_t *self) { if (self->on_bad_lines == ERROR) { self->error_msg = malloc(bufsize); snprintf(self->error_msg, bufsize, - "Expected %d fields in line %" PRIu64 ", saw %" PRId64 "\n", - ex_fields, self->file_lines, fields); + "Expected %" PRId64 " fields in line %" PRIu64 ", saw %" + PRId64 "\n", ex_fields, self->file_lines, fields); TRACE(("Error at line %d, %d fields\n", self->file_lines, fields)); @@ -471,8 +472,9 @@ static int end_line(parser_t *self) { // pass up error message msg = malloc(bufsize); snprintf(msg, bufsize, - "Skipping line %" PRIu64 ": expected %d fields, saw %" - PRId64 "\n", self->file_lines, ex_fields, fields); + "Skipping line %" PRIu64 ": expected %" PRId64 + " fields, saw %" PRId64 "\n", + self->file_lines, ex_fields, fields); append_warning(self, msg); free(msg); } diff --git a/pandas/_libs/src/parser/tokenizer.h b/pandas/_libs/src/parser/tokenizer.h index 623d3690f252a..d403435cfca9e 100644 --- a/pandas/_libs/src/parser/tokenizer.h +++ b/pandas/_libs/src/parser/tokenizer.h @@ -141,7 +141,7 @@ typedef struct parser_t { int usecols; // Boolean: 1: usecols provided, 0: none provided - int expected_fields; + Py_ssize_t expected_fields; BadLineHandleMethod on_bad_lines; // floating point options @@ -175,7 +175,7 @@ typedef struct coliter_t { int64_t col; } coliter_t; -void coliter_setup(coliter_t *self, parser_t *parser, int i, int start); +void coliter_setup(coliter_t *self, parser_t *parser, int64_t i, int64_t start); #define COLITER_NEXT(iter, word) \ do { \ diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx index afbb63ecbd2d7..638610c263ccd 100644 --- a/pandas/_libs/tslibs/parsing.pyx +++ b/pandas/_libs/tslibs/parsing.pyx @@ -422,7 +422,8 @@ cdef inline object _parse_dateabbr_string(object date_string, datetime default, cdef: object ret # year initialized to prevent compiler warnings - int year = -1, quarter = -1, month, mnum, date_len + int year = -1, quarter = -1, month, mnum + Py_ssize_t date_len # special handling for possibilities eg, 2Q2005, 2Q05, 2005Q1, 05Q1 assert isinstance(date_string, str)
Use correct types to avoid downcasts on Windows - [X] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/43995
2021-10-12T09:40:05Z
2021-10-12T22:19:37Z
2021-10-12T22:19:37Z
2021-10-12T22:19:37Z
REF: share more ExtensionIndex methods
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index a6360454cc02c..f6bb8e7af3558 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -356,6 +356,7 @@ def _outer_indexer( _typ: str = "index" _data: ExtensionArray | np.ndarray + _data_cls: type[np.ndarray] | type[ExtensionArray] = np.ndarray _id: object | None = None _name: Hashable = None # MultiIndex.levels previously allowed setting the index name. We @@ -640,7 +641,7 @@ def _simple_new(cls: type[_IndexT], values, name: Hashable = None) -> _IndexT: Must be careful not to recurse. """ - assert isinstance(values, np.ndarray), type(values) + assert isinstance(values, cls._data_cls), type(values) result = object.__new__(cls) result._data = values @@ -5020,6 +5021,14 @@ def equals(self, other: Any) -> bool: # d-level MultiIndex can equal d-tuple Index return other.equals(self) + if isinstance(self._values, ExtensionArray): + # Dispatch to the ExtensionArray's .equals method. + if not isinstance(other, type(self)): + return False + + earr = cast(ExtensionArray, self._data) + return earr.equals(other._data) + if is_extension_array_dtype(other.dtype): # All EA-backed Index subclasses override equals return other.equals(self) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 48171bdef24fd..d309dfc21eb95 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -69,6 +69,7 @@ _index_doc_kwargs = dict(ibase._index_doc_kwargs) _T = TypeVar("_T", bound="DatetimeIndexOpsMixin") +_TDT = TypeVar("_TDT", bound="DatetimeTimedeltaMixin") @inherit_names( @@ -529,7 +530,7 @@ def _can_fast_union(self: _T, other: _T) -> bool: # Only need to "adjoin", not overlap return (right_start == left_end + freq) or right_start in left - def _fast_union(self: _T, other: _T, sort=None) -> _T: + def _fast_union(self: _TDT, other: _TDT, sort=None) -> _TDT: # Caller is responsible for ensuring self and other are non-empty # to make our life easier, "sort" the two ranges diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py index 47ae88a52d919..afab7c8a839e6 100644 --- a/pandas/core/indexes/extension.py +++ b/pandas/core/indexes/extension.py @@ -3,10 +3,7 @@ """ from __future__ import annotations -from typing import ( - Hashable, - TypeVar, -) +from typing import TypeVar import numpy as np @@ -14,7 +11,6 @@ ArrayLike, npt, ) -from pandas.compat.numpy import function as nv from pandas.util._decorators import ( cache_readonly, doc, @@ -27,13 +23,7 @@ ) from pandas.core.dtypes.generic import ABCDataFrame -from pandas.core.arrays import ( - Categorical, - DatetimeArray, - IntervalArray, - PeriodArray, - TimedeltaArray, -) +from pandas.core.arrays import IntervalArray from pandas.core.arrays._mixins import NDArrayBackedExtensionArray from pandas.core.indexers import deprecate_ndim_indexing from pandas.core.indexes.base import Index @@ -148,38 +138,6 @@ class ExtensionIndex(Index): _data: IntervalArray | NDArrayBackedExtensionArray - _data_cls: ( - type[Categorical] - | type[DatetimeArray] - | type[TimedeltaArray] - | type[PeriodArray] - | type[IntervalArray] - ) - - @classmethod - def _simple_new( - cls, - array: IntervalArray | NDArrayBackedExtensionArray, - name: Hashable = None, - ): - """ - Construct from an ExtensionArray of the appropriate type. - - Parameters - ---------- - array : ExtensionArray - name : Label, default None - Attached as result.name - """ - assert isinstance(array, cls._data_cls), type(array) - - result = object.__new__(cls) - result._data = array - result._name = name - result._cache = {} - result._reset_identity() - return result - # --------------------------------------------------------------------- # NDarray-Like Methods @@ -198,11 +156,6 @@ def __getitem__(self, key): # --------------------------------------------------------------------- - def repeat(self, repeats, axis=None): - nv.validate_repeat((), {"axis": axis}) - result = self._data.repeat(repeats, axis=axis) - return type(self)._simple_new(result, name=self.name) - def insert(self, loc: int, item) -> Index: """ Make new Index inserting new item at location. Follows @@ -284,17 +237,6 @@ def _isnan(self) -> npt.NDArray[np.bool_]: # "ndarray") return self._data.isna() # type: ignore[return-value] - @doc(Index.equals) - def equals(self, other) -> bool: - # Dispatch to the ExtensionArray's .equals method. - if self.is_(other): - return True - - if not isinstance(other, type(self)): - return False - - return self._data.equals(other._data) - class NDArrayBackedExtensionIndex(ExtensionIndex): """ diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index daca14692ed09..7b0d163c659a5 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -2161,7 +2161,7 @@ def repeat(self, repeats: int, axis=None) -> MultiIndex: return MultiIndex( levels=self.levels, codes=[ - level_codes.view(np.ndarray).astype(np.intp).repeat(repeats) + level_codes.view(np.ndarray).astype(np.intp, copy=False).repeat(repeats) for level_codes in self.codes ], names=self.names,
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/43992
2021-10-12T04:54:51Z
2021-10-12T19:31:54Z
2021-10-12T19:31:54Z
2021-10-12T19:44:58Z
CLN: use numpy quantile in qcut
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 77cd73fdfe91b..10a5932731e3b 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -1117,89 +1117,6 @@ def checked_add_with_arr( return arr + b -def quantile(x, q, interpolation_method="fraction"): - """ - Compute sample quantile or quantiles of the input array. For example, q=0.5 - computes the median. - - The `interpolation_method` parameter supports three values, namely - `fraction` (default), `lower` and `higher`. Interpolation is done only, - if the desired quantile lies between two data points `i` and `j`. For - `fraction`, the result is an interpolated value between `i` and `j`; - for `lower`, the result is `i`, for `higher` the result is `j`. - - Parameters - ---------- - x : ndarray - Values from which to extract score. - q : scalar or array - Percentile at which to extract score. - interpolation_method : {'fraction', 'lower', 'higher'}, optional - This optional parameter specifies the interpolation method to use, - when the desired quantile lies between two data points `i` and `j`: - - - fraction: `i + (j - i)*fraction`, where `fraction` is the - fractional part of the index surrounded by `i` and `j`. - -lower: `i`. - - higher: `j`. - - Returns - ------- - score : float - Score at percentile. - - Examples - -------- - >>> from scipy import stats - >>> a = np.arange(100) - >>> stats.scoreatpercentile(a, 50) - 49.5 - - """ - x = np.asarray(x) - mask = isna(x) - - x = x[~mask] - - values = np.sort(x) - - def _interpolate(a, b, fraction): - """ - Returns the point at the given fraction between a and b, where - 'fraction' must be between 0 and 1. - """ - return a + (b - a) * fraction - - def _get_score(at): - if len(values) == 0: - return np.nan - - idx = at * (len(values) - 1) - if idx % 1 == 0: - score = values[int(idx)] - else: - if interpolation_method == "fraction": - score = _interpolate(values[int(idx)], values[int(idx) + 1], idx % 1) - elif interpolation_method == "lower": - score = values[np.floor(idx)] - elif interpolation_method == "higher": - score = values[np.ceil(idx)] - else: - raise ValueError( - "interpolation_method can only be 'fraction' " - ", 'lower' or 'higher'" - ) - - return score - - if is_scalar(q): - return _get_score(q) - - q = np.asarray(q, np.float64) - result = [_get_score(x) for x in q] - return np.array(result, dtype=np.float64) - - # --------------- # # select n # # --------------- # diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index 4ea4c055c12b0..a1b058224795e 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -367,11 +367,12 @@ def qcut( x = _preprocess_for_cut(x) x, dtype = _coerce_to_type(x) - if is_integer(q): - quantiles = np.linspace(0, 1, q + 1) - else: - quantiles = q - bins = algos.quantile(x, quantiles) + quantiles = np.linspace(0, 1, q + 1) if is_integer(q) else q + + x_np = np.asarray(x) + x_np = x_np[~np.isnan(x_np)] + bins = np.quantile(x_np, quantiles) + fac, bins = _bins_to_cuts( x, bins, diff --git a/pandas/tests/reshape/test_qcut.py b/pandas/tests/reshape/test_qcut.py index 6c4d14f1dede3..f7c7204d02a49 100644 --- a/pandas/tests/reshape/test_qcut.py +++ b/pandas/tests/reshape/test_qcut.py @@ -21,7 +21,6 @@ ) import pandas._testing as tm from pandas.api.types import CategoricalDtype as CDT -from pandas.core.algorithms import quantile from pandas.tseries.offsets import ( Day, @@ -34,8 +33,8 @@ def test_qcut(): # We store the bins as Index that have been # rounded to comparisons are a bit tricky. - labels, bins = qcut(arr, 4, retbins=True) - ex_bins = quantile(arr, [0, 0.25, 0.5, 0.75, 1.0]) + labels, _ = qcut(arr, 4, retbins=True) + ex_bins = np.quantile(arr, [0, 0.25, 0.5, 0.75, 1.0]) result = labels.categories.left.values assert np.allclose(result, ex_bins[:-1], atol=1e-2) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 4a0d6f2cccc32..74bcb589b008a 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -1731,14 +1731,6 @@ def test_hashtable_large_sizehint(self, hashtable): tbl = hashtable(size_hint=size_hint) # noqa -def test_quantile(): - s = Series(np.random.randn(100)) - - result = algos.quantile(s, [0, 0.25, 0.5, 0.75, 1.0]) - expected = algos.quantile(s.values, [0, 0.25, 0.5, 0.75, 1.0]) - tm.assert_almost_equal(result, expected) - - def test_unique_label_indices(): a = np.random.randint(1, 1 << 10, 1 << 15).astype(np.intp)
The quantile function in algos is a partial implementation of the numpy quantile and there are no pandas objects involved.
https://api.github.com/repos/pandas-dev/pandas/pulls/43991
2021-10-12T04:18:47Z
2021-10-12T19:27:24Z
2021-10-12T19:27:24Z
2021-10-12T19:31:13Z
TST: pass exact to assert_index_equal
diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py index fc7e36dda4619..c9f7fd43c1050 100644 --- a/pandas/_testing/asserters.py +++ b/pandas/_testing/asserters.py @@ -33,6 +33,7 @@ IntervalIndex, MultiIndex, PeriodIndex, + RangeIndex, Series, TimedeltaIndex, ) @@ -552,8 +553,19 @@ def assert_categorical_equal( """ _check_isinstance(left, right, Categorical) + exact: bool | str + if isinstance(left.categories, RangeIndex) or isinstance( + right.categories, RangeIndex + ): + exact = "equiv" + else: + # We still want to require exact matches for NumericIndex + exact = True + if check_category_order: - assert_index_equal(left.categories, right.categories, obj=f"{obj}.categories") + assert_index_equal( + left.categories, right.categories, obj=f"{obj}.categories", exact=exact + ) assert_numpy_array_equal( left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes" ) @@ -564,11 +576,12 @@ def assert_categorical_equal( except TypeError: # e.g. '<' not supported between instances of 'int' and 'str' lc, rc = left.categories, right.categories - assert_index_equal(lc, rc, obj=f"{obj}.categories") + assert_index_equal(lc, rc, obj=f"{obj}.categories", exact=exact) assert_index_equal( left.categories.take(left.codes), right.categories.take(right.codes), obj=f"{obj}.values", + exact=exact, ) assert_attr_equal("ordered", left, right, obj=obj) diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py index ec17b9f0bfd52..2e4551a449267 100644 --- a/pandas/tests/arithmetic/test_numeric.py +++ b/pandas/tests/arithmetic/test_numeric.py @@ -1215,12 +1215,12 @@ def check_binop(self, ops, scalars, idxs): b = b._rename("bar") result = op(a, b) expected = op(Int64Index(a), Int64Index(b)) - tm.assert_index_equal(result, expected) + tm.assert_index_equal(result, expected, exact="equiv") for idx in idxs: for scalar in scalars: result = op(idx, scalar) expected = op(Int64Index(idx), scalar) - tm.assert_index_equal(result, expected) + tm.assert_index_equal(result, expected, exact="equiv") def test_binops(self): ops = [ diff --git a/pandas/tests/frame/methods/test_sample.py b/pandas/tests/frame/methods/test_sample.py index d5d1f975deefa..be85d6a186fcc 100644 --- a/pandas/tests/frame/methods/test_sample.py +++ b/pandas/tests/frame/methods/test_sample.py @@ -363,5 +363,5 @@ def test_sample_ignore_index(self): {"col1": range(10, 20), "col2": range(20, 30), "colString": ["a"] * 10} ) result = df.sample(3, ignore_index=True) - expected_index = Index([0, 1, 2]) - tm.assert_index_equal(result.index, expected_index) + expected_index = Index(range(3)) + tm.assert_index_equal(result.index, expected_index, exact=True) diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 38127a0e255bd..2fdbeb56f8011 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -604,12 +604,15 @@ def test_map(self, simple_index): expected = idx.astype("int64") elif is_float_dtype(idx.dtype): expected = idx.astype("float64") + if idx._is_backward_compat_public_numeric_index: + # We get a NumericIndex back, not Float64Index + expected = type(idx)(expected) else: expected = idx result = idx.map(lambda x: x) # For RangeIndex we convert to Int64Index - tm.assert_index_equal(result, expected) + tm.assert_index_equal(result, expected, exact="equiv") @pytest.mark.parametrize( "mapper", @@ -634,7 +637,7 @@ def test_map_dictlike(self, mapper, simple_index): result = idx.map(identity) # For RangeIndex we convert to Int64Index - tm.assert_index_equal(result, expected) + tm.assert_index_equal(result, expected, exact="equiv") # empty mappable if idx._is_backward_compat_public_numeric_index: @@ -827,16 +830,18 @@ def test_insert_na(self, nulls_fixture, simple_index): if na_val is pd.NaT: expected = Index([index[0], pd.NaT] + list(index[1:]), dtype=object) - elif type(index) is NumericIndex and index.dtype.kind == "f": - # GH#43921 - expected = NumericIndex( - [index[0], np.nan] + list(index[1:]), dtype=index.dtype - ) else: expected = Float64Index([index[0], np.nan] + list(index[1:])) + if index._is_backward_compat_public_numeric_index: + # GH#43921 we preserve NumericIndex + if index.dtype.kind == "f": + expected = NumericIndex(expected, dtype=index.dtype) + else: + expected = NumericIndex(expected) + result = index.insert(1, na_val) - tm.assert_index_equal(result, expected) + tm.assert_index_equal(result, expected, exact=True) def test_arithmetic_explicit_conversions(self): # GH 8608 diff --git a/pandas/tests/indexes/ranges/test_constructors.py b/pandas/tests/indexes/ranges/test_constructors.py index 8fb1d7a210cee..c4f26220f87d1 100644 --- a/pandas/tests/indexes/ranges/test_constructors.py +++ b/pandas/tests/indexes/ranges/test_constructors.py @@ -31,7 +31,7 @@ def test_constructor(self, args, kwargs, start, stop, step, name): assert isinstance(result, RangeIndex) assert result.name is name assert result._range == range(start, stop, step) - tm.assert_index_equal(result, expected) + tm.assert_index_equal(result, expected, exact="equiv") def test_constructor_invalid_args(self): msg = "RangeIndex\\(\\.\\.\\.\\) must be called with integers" @@ -149,7 +149,9 @@ def test_constructor_corner(self): index = RangeIndex(1, 5) assert index.values.dtype == np.int64 with tm.assert_produces_warning(FutureWarning, match="will not infer"): - tm.assert_index_equal(index, Index(arr).astype("int64")) + expected = Index(arr).astype("int64") + + tm.assert_index_equal(index, expected, exact="equiv") # non-int raise Exception with pytest.raises(TypeError, match=r"Wrong type \<class 'str'\>"): diff --git a/pandas/tests/indexes/ranges/test_join.py b/pandas/tests/indexes/ranges/test_join.py index 353605da91f94..ed21996de891b 100644 --- a/pandas/tests/indexes/ranges/test_join.py +++ b/pandas/tests/indexes/ranges/test_join.py @@ -77,7 +77,7 @@ def test_join_inner(self): res, lidx, ridx = index.join(other, how="inner", return_indexers=True) assert isinstance(res, RangeIndex) - tm.assert_index_equal(res, eres) + tm.assert_index_equal(res, eres, exact="equiv") tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) diff --git a/pandas/tests/indexes/ranges/test_range.py b/pandas/tests/indexes/ranges/test_range.py index e064974f0e006..9732c0faf9efd 100644 --- a/pandas/tests/indexes/ranges/test_range.py +++ b/pandas/tests/indexes/ranges/test_range.py @@ -115,7 +115,7 @@ def test_insert(self): result = idx[1:4] # test 0th element - tm.assert_index_equal(idx[0:4], result.insert(0, idx[0])) + tm.assert_index_equal(idx[0:4], result.insert(0, idx[0]), exact="equiv") # GH 18295 (test missing) expected = Float64Index([0, np.nan, 1, 2, 3, 4]) @@ -132,12 +132,13 @@ def test_delete(self): idx = RangeIndex(5, name="Foo") expected = idx[1:].astype(int) result = idx.delete(0) - tm.assert_index_equal(result, expected) + # TODO: could preserve RangeIndex at the ends + tm.assert_index_equal(result, expected, exact="equiv") assert result.name == expected.name expected = idx[:-1].astype(int) result = idx.delete(-1) - tm.assert_index_equal(result, expected) + tm.assert_index_equal(result, expected, exact="equiv") assert result.name == expected.name msg = "index 5 is out of bounds for axis 0 with size 5" @@ -300,7 +301,7 @@ def test_nbytes(self): # memory savings vs int index idx = RangeIndex(0, 1000) - assert idx.nbytes < Int64Index(idx.values).nbytes / 10 + assert idx.nbytes < Int64Index(idx._values).nbytes / 10 # constant memory usage i2 = RangeIndex(0, 10) @@ -395,38 +396,38 @@ def test_slice_specialised(self, simple_index): # positive slice values index_slice = index[7:10:2] expected = Index(np.array([14, 18]), name="foo") - tm.assert_index_equal(index_slice, expected) + tm.assert_index_equal(index_slice, expected, exact="equiv") # negative slice values index_slice = index[-1:-5:-2] expected = Index(np.array([18, 14]), name="foo") - tm.assert_index_equal(index_slice, expected) + tm.assert_index_equal(index_slice, expected, exact="equiv") # stop overshoot index_slice = index[2:100:4] expected = Index(np.array([4, 12]), name="foo") - tm.assert_index_equal(index_slice, expected) + tm.assert_index_equal(index_slice, expected, exact="equiv") # reverse index_slice = index[::-1] expected = Index(index.values[::-1], name="foo") - tm.assert_index_equal(index_slice, expected) + tm.assert_index_equal(index_slice, expected, exact="equiv") index_slice = index[-8::-1] expected = Index(np.array([4, 2, 0]), name="foo") - tm.assert_index_equal(index_slice, expected) + tm.assert_index_equal(index_slice, expected, exact="equiv") index_slice = index[-40::-1] expected = Index(np.array([], dtype=np.int64), name="foo") - tm.assert_index_equal(index_slice, expected) + tm.assert_index_equal(index_slice, expected, exact="equiv") index_slice = index[40::-1] expected = Index(index.values[40::-1], name="foo") - tm.assert_index_equal(index_slice, expected) + tm.assert_index_equal(index_slice, expected, exact="equiv") index_slice = index[10::-1] expected = Index(index.values[::-1], name="foo") - tm.assert_index_equal(index_slice, expected) + tm.assert_index_equal(index_slice, expected, exact="equiv") @pytest.mark.parametrize("step", set(range(-5, 6)) - {0}) def test_len_specialised(self, step): diff --git a/pandas/tests/indexes/ranges/test_setops.py b/pandas/tests/indexes/ranges/test_setops.py index 71fd5396b850b..e81271d8ee306 100644 --- a/pandas/tests/indexes/ranges/test_setops.py +++ b/pandas/tests/indexes/ranges/test_setops.py @@ -75,17 +75,17 @@ def test_intersection(self, sort): other = RangeIndex(1, 6) result = index.intersection(other, sort=sort) expected = Index(np.sort(np.intersect1d(index.values, other.values))) - tm.assert_index_equal(result, expected) + tm.assert_index_equal(result, expected, exact="equiv") # intersect with decreasing RangeIndex other = RangeIndex(5, 0, -1) result = index.intersection(other, sort=sort) expected = Index(np.sort(np.intersect1d(index.values, other.values))) - tm.assert_index_equal(result, expected) + tm.assert_index_equal(result, expected, exact="equiv") # reversed (GH 17296) result = other.intersection(index, sort=sort) - tm.assert_index_equal(result, expected) + tm.assert_index_equal(result, expected, exact="equiv") # GH 17296: intersect two decreasing RangeIndexes first = RangeIndex(10, -2, -2) @@ -288,7 +288,7 @@ def test_union_sorted(self, unions): res2 = idx2.union(idx1, sort=None) res3 = Int64Index(idx1._values, name=idx1.name).union(idx2, sort=None) tm.assert_index_equal(res2, expected_sorted, exact=True) - tm.assert_index_equal(res3, expected_sorted) + tm.assert_index_equal(res3, expected_sorted, exact="equiv") def test_difference(self): # GH#12034 Cases where we operate against another RangeIndex and may diff --git a/pandas/tests/indexes/test_any_index.py b/pandas/tests/indexes/test_any_index.py index 510d76ebe4407..f68bde2188e67 100644 --- a/pandas/tests/indexes/test_any_index.py +++ b/pandas/tests/indexes/test_any_index.py @@ -60,7 +60,7 @@ def test_map_identity_mapping(index): expected = index.astype(np.int64) else: expected = index - tm.assert_index_equal(result, expected) + tm.assert_index_equal(result, expected, exact="equiv") def test_wrong_number_names(index): diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 51ddcce618661..4b7a377570fd5 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -22,6 +22,7 @@ DataFrame, DatetimeIndex, IntervalIndex, + NumericIndex, PeriodIndex, RangeIndex, Series, @@ -680,8 +681,8 @@ def test_map_tseries_indices_return_index(self, attr): def test_map_tseries_indices_accsr_return_index(self): date_index = tm.makeDateIndex(24, freq="h", name="hourly") - expected = Index(range(24), name="hourly") - tm.assert_index_equal(expected, date_index.map(lambda x: x.hour)) + expected = Int64Index(range(24), name="hourly") + tm.assert_index_equal(expected, date_index.map(lambda x: x.hour), exact=True) @pytest.mark.parametrize( "mapper", @@ -1751,14 +1752,15 @@ def test_validate_1d_input(): [Float64Index, {}], [DatetimeIndex, {}], [TimedeltaIndex, {}], + [NumericIndex, {}], [PeriodIndex, {"freq": "Y"}], ], ) def test_construct_from_memoryview(klass, extra_kwargs): # GH 13120 result = klass(memoryview(np.arange(2000, 2005)), **extra_kwargs) - expected = klass(range(2000, 2005), **extra_kwargs) - tm.assert_index_equal(result, expected) + expected = klass(list(range(2000, 2005)), **extra_kwargs) + tm.assert_index_equal(result, expected, exact=True) def test_index_set_names_pos_args_deprecation(): diff --git a/pandas/tests/indexes/test_setops.py b/pandas/tests/indexes/test_setops.py index 44d1d9710df45..a0e97223435e6 100644 --- a/pandas/tests/indexes/test_setops.py +++ b/pandas/tests/indexes/test_setops.py @@ -15,6 +15,7 @@ DatetimeIndex, Index, MultiIndex, + RangeIndex, Series, TimedeltaIndex, Timestamp, @@ -453,19 +454,20 @@ def test_intersection_difference_match_empty(self, index, sort): "method", ["intersection", "union", "difference", "symmetric_difference"] ) def test_setop_with_categorical(index, sort, method): - if isinstance(index, MultiIndex): + if isinstance(index, MultiIndex): # TODO: flat_index? # tested separately in tests.indexes.multi.test_setops return other = index.astype("category") + exact = "equiv" if isinstance(index, RangeIndex) else True result = getattr(index, method)(other, sort=sort) expected = getattr(index, method)(index, sort=sort) - tm.assert_index_equal(result, expected) + tm.assert_index_equal(result, expected, exact=exact) result = getattr(index, method)(other[:5], sort=sort) expected = getattr(index, method)(index[:5], sort=sort) - tm.assert_index_equal(result, expected) + tm.assert_index_equal(result, expected, exact=exact) def test_intersection_duplicates_all_indexes(index): diff --git a/pandas/tests/window/test_groupby.py b/pandas/tests/window/test_groupby.py index 2077d2a210765..c4efcd140baae 100644 --- a/pandas/tests/window/test_groupby.py +++ b/pandas/tests/window/test_groupby.py @@ -680,6 +680,7 @@ def test_groupby_rolling_resulting_multiindex(self): ) tm.assert_index_equal(result.index, expected_index) + def test_groupby_rolling_resulting_multiindex2(self): # grouping by 2 columns -> 3-level MI as result df = DataFrame({"a": np.arange(12.0), "b": [1, 2] * 6, "c": [1, 2, 3, 4] * 3}) result = df.groupby(["b", "c"]).rolling(2).sum() @@ -702,6 +703,7 @@ def test_groupby_rolling_resulting_multiindex(self): ) tm.assert_index_equal(result.index, expected_index) + def test_groupby_rolling_resulting_multiindex3(self): # grouping with 1 level on dataframe with 2-level MI -> 3-level MI as result df = DataFrame({"a": np.arange(8.0), "b": [1, 2] * 4, "c": [1, 2, 3, 4] * 2}) df = df.set_index("c", append=True) @@ -719,7 +721,7 @@ def test_groupby_rolling_resulting_multiindex(self): ], names=["b", None, "c"], ) - tm.assert_index_equal(result.index, expected_index) + tm.assert_index_equal(result.index, expected_index, exact="equiv") def test_groupby_rolling_object_doesnt_affect_groupby_apply(self): # GH 39732
After this we're close to being able to change the default from "equiv" to True (not that we would since that would be an API change)
https://api.github.com/repos/pandas-dev/pandas/pulls/43989
2021-10-12T01:50:11Z
2021-10-12T19:26:51Z
2021-10-12T19:26:50Z
2021-10-12T19:31:54Z
PERF: RangeIndex.insert
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index d219361dbdd57..90649ad2dcbc1 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -713,6 +713,20 @@ def symmetric_difference(self, other, result_name: Hashable = None, sort=None): # -------------------------------------------------------------------- + def insert(self, loc: int, item) -> Index: + if len(self) and (is_integer(item) or is_float(item)): + # We can retain RangeIndex is inserting at the beginning or end + rng = self._range + if loc == 0 and item == self[0] - self.step: + new_rng = range(rng.start - rng.step, rng.stop, rng.step) + return type(self)._simple_new(new_rng, name=self.name) + + elif loc == len(self) and item == self[-1] + self.step: + new_rng = range(rng.start, rng.stop + rng.step, rng.step) + return type(self)._simple_new(new_rng, name=self.name) + + return super().insert(loc, item) + def _concat(self, indexes: list[Index], name: Hashable) -> Index: """ Overriding parent method for the case of all RangeIndex instances. diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 38127a0e255bd..75866a3c8f931 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -818,7 +818,7 @@ def test_insert_non_na(self, simple_index): cls = Int64Index expected = cls([index[0]] + list(index), dtype=index.dtype) - tm.assert_index_equal(result, expected) + tm.assert_index_equal(result, expected, exact=True) def test_insert_na(self, nulls_fixture, simple_index): # GH 18295 (test missing)
``` index = pd.Index(range(2)) %timeit index.insert(2, 2) 48.4 µs ± 2.03 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each) # <- master 2.54 µs ± 70.1 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each) # <- PR ```
https://api.github.com/repos/pandas-dev/pandas/pulls/43988
2021-10-11T23:50:46Z
2021-10-12T19:32:17Z
2021-10-12T19:32:17Z
2021-10-12T19:45:16Z
REF: extract params used in DataFrame.__repr__
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 76a00071c8adc..9ce8a19b98857 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -994,24 +994,8 @@ def __repr__(self) -> str: self.info(buf=buf) return buf.getvalue() - max_rows = get_option("display.max_rows") - min_rows = get_option("display.min_rows") - max_cols = get_option("display.max_columns") - max_colwidth = get_option("display.max_colwidth") - show_dimensions = get_option("display.show_dimensions") - if get_option("display.expand_frame_repr"): - width, _ = console.get_console_size() - else: - width = None - self.to_string( - buf=buf, - max_rows=max_rows, - min_rows=min_rows, - max_cols=max_cols, - line_width=width, - max_colwidth=max_colwidth, - show_dimensions=show_dimensions, - ) + repr_params = fmt.get_dataframe_repr_params() + self.to_string(buf=buf, **repr_params) return buf.getvalue() diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index cfda2911db73f..07811be909330 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -483,6 +483,37 @@ def get_adjustment() -> TextAdjustment: return TextAdjustment() +def get_dataframe_repr_params() -> dict[str, Any]: + """Get the parameters used to repr(dataFrame) calls using DataFrame.to_string. + + Supplying these parameters to DataFrame.to_string is equivalent to calling + ``repr(DataFrame)``. This is useful if you want to adjust the repr output. + + Example + ------- + >>> import pandas as pd + >>> + >>> df = pd.DataFrame([[1, 2], [3, 4]]) + >>> repr_params = pd.io.formats.format.get_dataframe_repr_params() + >>> repr(df) == df.to_string(**repr_params) + True + """ + from pandas.io.formats import console + + if get_option("display.expand_frame_repr"): + line_width, _ = console.get_console_size() + else: + line_width = None + return { + "max_rows": get_option("display.max_rows"), + "min_rows": get_option("display.min_rows"), + "max_cols": get_option("display.max_columns"), + "max_colwidth": get_option("display.max_colwidth"), + "show_dimensions": get_option("display.show_dimensions"), + "line_width": line_width, + } + + class DataFrameFormatter: """Class for processing dataframe formatting options and data."""
`DataFrame.__repr__` used different parameters than `DataFrame.to_string` and I've got a case where I want to use the `__repr__` ones, but with a change to one of the parameters. This refactoring makes it possible to extract those parameters so we can do: ```python >>> params = pd.io.formats.format.get_frame_repr_params() >>> params[my_param] = new_value >>> df.to_string(**params) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/43987
2021-10-11T22:58:31Z
2021-10-14T23:33:23Z
2021-10-14T23:33:23Z
2021-10-16T13:12:03Z
BUG: Fix `skipna` default value in method signatures
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index b235f120d98c8..0e86a544f3435 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -10387,15 +10387,13 @@ def _stat_function_ddof( name: str, func, axis=None, - skipna=None, + skipna=True, level=None, ddof=1, numeric_only=None, **kwargs, ): nv.validate_stat_ddof_func((), kwargs, fname=name) - if skipna is None: - skipna = True if axis is None: axis = self._stat_axis_number if level is not None: @@ -10414,21 +10412,21 @@ def _stat_function_ddof( ) def sem( - self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs + self, axis=None, skipna=True, level=None, ddof=1, numeric_only=None, **kwargs ): return self._stat_function_ddof( "sem", nanops.nansem, axis, skipna, level, ddof, numeric_only, **kwargs ) def var( - self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs + self, axis=None, skipna=True, level=None, ddof=1, numeric_only=None, **kwargs ): return self._stat_function_ddof( "var", nanops.nanvar, axis, skipna, level, ddof, numeric_only, **kwargs ) def std( - self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs + self, axis=None, skipna=True, level=None, ddof=1, numeric_only=None, **kwargs ): return self._stat_function_ddof( "std", nanops.nanstd, axis, skipna, level, ddof, numeric_only, **kwargs @@ -10440,7 +10438,7 @@ def _stat_function( name: str, func, axis=None, - skipna=None, + skipna=True, level=None, numeric_only=None, **kwargs, @@ -10449,8 +10447,6 @@ def _stat_function( nv.validate_median((), kwargs) else: nv.validate_stat_func((), kwargs, fname=name) - if skipna is None: - skipna = True if axis is None: axis = self._stat_axis_number if level is not None: @@ -10468,32 +10464,32 @@ def _stat_function( func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only ) - def min(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs): + def min(self, axis=None, skipna=True, level=None, numeric_only=None, **kwargs): return self._stat_function( "min", nanops.nanmin, axis, skipna, level, numeric_only, **kwargs ) - def max(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs): + def max(self, axis=None, skipna=True, level=None, numeric_only=None, **kwargs): return self._stat_function( "max", nanops.nanmax, axis, skipna, level, numeric_only, **kwargs ) - def mean(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs): + def mean(self, axis=None, skipna=True, level=None, numeric_only=None, **kwargs): return self._stat_function( "mean", nanops.nanmean, axis, skipna, level, numeric_only, **kwargs ) - def median(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs): + def median(self, axis=None, skipna=True, level=None, numeric_only=None, **kwargs): return self._stat_function( "median", nanops.nanmedian, axis, skipna, level, numeric_only, **kwargs ) - def skew(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs): + def skew(self, axis=None, skipna=True, level=None, numeric_only=None, **kwargs): return self._stat_function( "skew", nanops.nanskew, axis, skipna, level, numeric_only, **kwargs ) - def kurt(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs): + def kurt(self, axis=None, skipna=True, level=None, numeric_only=None, **kwargs): return self._stat_function( "kurt", nanops.nankurt, axis, skipna, level, numeric_only, **kwargs ) @@ -10506,7 +10502,7 @@ def _min_count_stat_function( name: str, func, axis=None, - skipna=None, + skipna=True, level=None, numeric_only=None, min_count=0, @@ -10518,8 +10514,6 @@ def _min_count_stat_function( nv.validate_prod((), kwargs) else: nv.validate_stat_func((), kwargs, fname=name) - if skipna is None: - skipna = True if axis is None: axis = self._stat_axis_number if level is not None: @@ -10550,7 +10544,7 @@ def _min_count_stat_function( def sum( self, axis=None, - skipna=None, + skipna=True, level=None, numeric_only=None, min_count=0, @@ -10563,7 +10557,7 @@ def sum( def prod( self, axis=None, - skipna=None, + skipna=True, level=None, numeric_only=None, min_count=0, @@ -10690,7 +10684,7 @@ def mad(self, axis=None, skipna=None, level=None): def sem( self, axis=None, - skipna=None, + skipna=True, level=None, ddof=1, numeric_only=None, @@ -10712,7 +10706,7 @@ def sem( def var( self, axis=None, - skipna=None, + skipna=True, level=None, ddof=1, numeric_only=None, @@ -10735,7 +10729,7 @@ def var( def std( self, axis=None, - skipna=None, + skipna=True, level=None, ddof=1, numeric_only=None, @@ -10815,7 +10809,7 @@ def cumprod(self, axis=None, skipna=True, *args, **kwargs): def sum( self, axis=None, - skipna=None, + skipna=True, level=None, numeric_only=None, min_count=0, @@ -10840,7 +10834,7 @@ def sum( def prod( self, axis=None, - skipna=None, + skipna=True, level=None, numeric_only=None, min_count=0, @@ -10863,7 +10857,7 @@ def prod( see_also="", examples="", ) - def mean(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs): + def mean(self, axis=None, skipna=True, level=None, numeric_only=None, **kwargs): return NDFrame.mean(self, axis, skipna, level, numeric_only, **kwargs) setattr(cls, "mean", mean) @@ -10878,7 +10872,7 @@ def mean(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs): see_also="", examples="", ) - def skew(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs): + def skew(self, axis=None, skipna=True, level=None, numeric_only=None, **kwargs): return NDFrame.skew(self, axis, skipna, level, numeric_only, **kwargs) setattr(cls, "skew", skew) @@ -10896,7 +10890,7 @@ def skew(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs): see_also="", examples="", ) - def kurt(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs): + def kurt(self, axis=None, skipna=True, level=None, numeric_only=None, **kwargs): return NDFrame.kurt(self, axis, skipna, level, numeric_only, **kwargs) setattr(cls, "kurt", kurt) @@ -10913,7 +10907,7 @@ def kurt(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs): examples="", ) def median( - self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs + self, axis=None, skipna=True, level=None, numeric_only=None, **kwargs ): return NDFrame.median(self, axis, skipna, level, numeric_only, **kwargs) @@ -10931,7 +10925,7 @@ def median( see_also=_stat_func_see_also, examples=_max_examples, ) - def max(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs): + def max(self, axis=None, skipna=True, level=None, numeric_only=None, **kwargs): return NDFrame.max(self, axis, skipna, level, numeric_only, **kwargs) setattr(cls, "max", max) @@ -10948,7 +10942,7 @@ def max(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs): see_also=_stat_func_see_also, examples=_min_examples, ) - def min(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs): + def min(self, axis=None, skipna=True, level=None, numeric_only=None, **kwargs): return NDFrame.min(self, axis, skipna, level, numeric_only, **kwargs) setattr(cls, "min", min)
Set `skipna` default values to `True` so that the value matches the purpose of the variable and its docstring. - [X] closes #34063 - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/43980
2021-10-11T21:09:30Z
2021-10-16T17:28:54Z
2021-10-16T17:28:54Z
2021-10-16T17:30:01Z
TYP: misc
diff --git a/pandas/_libs/algos.pyi b/pandas/_libs/algos.pyi index bff7d117c97da..6dd1c7c9fb209 100644 --- a/pandas/_libs/algos.pyi +++ b/pandas/_libs/algos.pyi @@ -1,4 +1,5 @@ -# Note: this covers algos.pyx and algos_common_helper but NOT algos_take_helper +from __future__ import annotations + from typing import Any import numpy as np @@ -82,7 +83,7 @@ def pad( old: np.ndarray, # ndarray[algos_t] new: np.ndarray, # ndarray[algos_t] limit=..., -) -> np.ndarray: ... # np.ndarray[np.intp, ndim=1] +) -> npt.NDArray[np.intp]: ... # np.ndarray[np.intp, ndim=1] def pad_inplace( values: np.ndarray, # algos_t[:] mask: np.ndarray, # uint8_t[:] @@ -97,7 +98,7 @@ def backfill( old: np.ndarray, # ndarray[algos_t] new: np.ndarray, # ndarray[algos_t] limit=..., -) -> np.ndarray: ... # np.ndarray[np.intp, ndim=1] +) -> npt.NDArray[np.intp]: ... # np.ndarray[np.intp, ndim=1] def backfill_inplace( values: np.ndarray, # algos_t[:] mask: np.ndarray, # uint8_t[:] @@ -163,230 +164,287 @@ def ensure_uint16(arr: object, copy=...) -> npt.NDArray[np.uint16]: ... def ensure_uint32(arr: object, copy=...) -> npt.NDArray[np.uint32]: ... def ensure_uint64(arr: object, copy=...) -> npt.NDArray[np.uint64]: ... def take_1d_int8_int8( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_1d_int8_int32( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_1d_int8_int64( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_1d_int8_float64( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_1d_int16_int16( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_1d_int16_int32( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_1d_int16_int64( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_1d_int16_float64( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_1d_int32_int32( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_1d_int32_int64( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_1d_int32_float64( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_1d_int64_int64( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_1d_int64_float64( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_1d_float32_float32( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_1d_float32_float64( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_1d_float64_float64( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_1d_object_object( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_1d_bool_bool( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_1d_bool_object( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis0_int8_int8( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis0_int8_int32( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis0_int8_int64( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis0_int8_float64( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis0_int16_int16( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis0_int16_int32( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis0_int16_int64( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis0_int16_float64( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis0_int32_int32( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis0_int32_int64( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis0_int32_float64( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis0_int64_int64( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis0_int64_float64( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis0_float32_float32( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis0_float32_float64( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis0_float64_float64( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis0_object_object( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis0_bool_bool( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis0_bool_object( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis1_int8_int8( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis1_int8_int32( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis1_int8_int64( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis1_int8_float64( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis1_int16_int16( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis1_int16_int32( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis1_int16_int64( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis1_int16_float64( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis1_int32_int32( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis1_int32_int64( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis1_int32_float64( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis1_int64_int64( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis1_int64_float64( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis1_float32_float32( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis1_float32_float64( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis1_float64_float64( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis1_object_object( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis1_bool_bool( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_axis1_bool_object( - values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... def take_2d_multi_int8_int8( - values: np.ndarray, indexer, out: np.ndarray, fill_value=... + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., ) -> None: ... def take_2d_multi_int8_int32( - values: np.ndarray, indexer, out: np.ndarray, fill_value=... + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., ) -> None: ... def take_2d_multi_int8_int64( - values: np.ndarray, indexer, out: np.ndarray, fill_value=... + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., ) -> None: ... def take_2d_multi_int8_float64( - values: np.ndarray, indexer, out: np.ndarray, fill_value=... + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., ) -> None: ... def take_2d_multi_int16_int16( - values: np.ndarray, indexer, out: np.ndarray, fill_value=... + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., ) -> None: ... def take_2d_multi_int16_int32( - values: np.ndarray, indexer, out: np.ndarray, fill_value=... + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., ) -> None: ... def take_2d_multi_int16_int64( - values: np.ndarray, indexer, out: np.ndarray, fill_value=... + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., ) -> None: ... def take_2d_multi_int16_float64( - values: np.ndarray, indexer, out: np.ndarray, fill_value=... + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., ) -> None: ... def take_2d_multi_int32_int32( - values: np.ndarray, indexer, out: np.ndarray, fill_value=... + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., ) -> None: ... def take_2d_multi_int32_int64( - values: np.ndarray, indexer, out: np.ndarray, fill_value=... + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., ) -> None: ... def take_2d_multi_int32_float64( - values: np.ndarray, indexer, out: np.ndarray, fill_value=... + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., ) -> None: ... def take_2d_multi_int64_float64( - values: np.ndarray, indexer, out: np.ndarray, fill_value=... + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., ) -> None: ... def take_2d_multi_float32_float32( - values: np.ndarray, indexer, out: np.ndarray, fill_value=... + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., ) -> None: ... def take_2d_multi_float32_float64( - values: np.ndarray, indexer, out: np.ndarray, fill_value=... + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., ) -> None: ... def take_2d_multi_float64_float64( - values: np.ndarray, indexer, out: np.ndarray, fill_value=... + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., ) -> None: ... def take_2d_multi_object_object( - values: np.ndarray, indexer, out: np.ndarray, fill_value=... + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., ) -> None: ... def take_2d_multi_bool_bool( - values: np.ndarray, indexer, out: np.ndarray, fill_value=... + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., ) -> None: ... def take_2d_multi_bool_object( - values: np.ndarray, indexer, out: np.ndarray, fill_value=... + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., ) -> None: ... def take_2d_multi_int64_int64( - values: np.ndarray, indexer, out: np.ndarray, fill_value=... + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., ) -> None: ... diff --git a/pandas/_libs/hashing.pyx b/pandas/_libs/hashing.pyx index 2dd2f1feadd70..75eee4d432637 100644 --- a/pandas/_libs/hashing.pyx +++ b/pandas/_libs/hashing.pyx @@ -120,13 +120,6 @@ cdef inline uint64_t _rotl(uint64_t x, uint64_t b) nogil: return (x << b) | (x >> (64 - b)) -cdef inline void u32to8_le(uint8_t* p, uint32_t v) nogil: - p[0] = <uint8_t>(v) - p[1] = <uint8_t>(v >> 8) - p[2] = <uint8_t>(v >> 16) - p[3] = <uint8_t>(v >> 24) - - cdef inline uint64_t u8to64_le(uint8_t* p) nogil: return (<uint64_t>p[0] | <uint64_t>p[1] << 8 | diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in index 6d51ea7d5de7b..77d3f954a9a5d 100644 --- a/pandas/_libs/hashtable_class_helper.pxi.in +++ b/pandas/_libs/hashtable_class_helper.pxi.in @@ -1046,7 +1046,7 @@ cdef class StringHashTable(HashTable): not None, then _additionally_ any value "val" satisfying val == na_value is considered missing. mask : ndarray[bool], optional - Not yet implementd for StringHashTable. + Not yet implemented for StringHashTable. Returns ------- diff --git a/pandas/_libs/index.pyi b/pandas/_libs/index.pyi index 18ee216d5c4a1..446a980487cde 100644 --- a/pandas/_libs/index.pyi +++ b/pandas/_libs/index.pyi @@ -62,4 +62,4 @@ class BaseMultiIndexCodesEngine: values: np.ndarray, # np.ndarray[object] of tuples method: str, limit: int | None, - ) -> np.ndarray: ... # np.ndarray[np.int64] + ) -> npt.NDArray[np.intp]: ... diff --git a/pandas/core/array_algos/take.py b/pandas/core/array_algos/take.py index 6d350cfa2c1d6..7da1b571fcd27 100644 --- a/pandas/core/array_algos/take.py +++ b/pandas/core/array_algos/take.py @@ -13,7 +13,10 @@ algos as libalgos, lib, ) -from pandas._typing import ArrayLike +from pandas._typing import ( + ArrayLike, + npt, +) from pandas.core.dtypes.cast import maybe_promote from pandas.core.dtypes.common import ( @@ -110,7 +113,7 @@ def take_nd( def _take_nd_ndarray( arr: np.ndarray, - indexer, + indexer: npt.NDArray[np.intp] | None, axis: int, fill_value, allow_fill: bool, @@ -122,7 +125,7 @@ def _take_nd_ndarray( else: indexer = ensure_platform_int(indexer) - indexer, dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value( + dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value( arr, indexer, fill_value, allow_fill ) @@ -160,7 +163,7 @@ def _take_nd_ndarray( def take_1d( arr: ArrayLike, - indexer: np.ndarray, + indexer: npt.NDArray[np.intp], fill_value=None, allow_fill: bool = True, ) -> ArrayLike: @@ -168,7 +171,7 @@ def take_1d( Specialized version for 1D arrays. Differences compared to `take_nd`: - Assumes input array has already been converted to numpy array / EA - - Assumes indexer is already guaranteed to be int64 dtype ndarray + - Assumes indexer is already guaranteed to be intp dtype ndarray - Only works for 1D arrays To ensure the lowest possible overhead. @@ -176,6 +179,8 @@ def take_1d( Note: similarly to `take_nd`, this function assumes that the indexer is a valid(ated) indexer with no out of bound indices. """ + indexer = ensure_platform_int(indexer) + if not isinstance(arr, np.ndarray): # ExtensionArray -> dispatch to their method return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) @@ -183,7 +188,7 @@ def take_1d( if not allow_fill: return arr.take(indexer) - indexer, dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value( + dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value( arr, indexer, fill_value, True ) @@ -200,7 +205,9 @@ def take_1d( def take_2d_multi( - arr: np.ndarray, indexer: tuple[np.ndarray, np.ndarray], fill_value=np.nan + arr: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + fill_value=np.nan, ) -> np.ndarray: """ Specialized Cython take which sets NaN values in one pass. @@ -249,6 +256,7 @@ def take_2d_multi( if func is not None: func(arr, indexer, out=out, fill_value=fill_value) else: + # test_reindex_multi _take_2d_multi_object( arr, indexer, out, fill_value=fill_value, mask_info=mask_info ) @@ -457,7 +465,7 @@ def wrapper( def _take_nd_object( arr: np.ndarray, - indexer: np.ndarray, # np.ndarray[np.intp] + indexer: npt.NDArray[np.intp], out: np.ndarray, axis: int, fill_value, @@ -480,7 +488,7 @@ def _take_nd_object( def _take_2d_multi_object( arr: np.ndarray, - indexer: tuple[np.ndarray, np.ndarray], + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], out: np.ndarray, fill_value, mask_info, @@ -509,7 +517,7 @@ def _take_2d_multi_object( def _take_preprocess_indexer_and_fill_value( arr: np.ndarray, - indexer: np.ndarray, + indexer: npt.NDArray[np.intp], fill_value, allow_fill: bool, ): @@ -533,5 +541,4 @@ def _take_preprocess_indexer_and_fill_value( # to crash when trying to cast it to dtype) dtype, fill_value = arr.dtype, arr.dtype.type() - indexer = ensure_platform_int(indexer) - return indexer, dtype, fill_value, mask_info + return dtype, fill_value, mask_info diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 76a00071c8adc..0aea55613a78a 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4672,17 +4672,23 @@ def _reindex_columns( allow_dups=False, ) - def _reindex_multi(self, axes, copy: bool, fill_value) -> DataFrame: + def _reindex_multi( + self, axes: dict[str, Index], copy: bool, fill_value + ) -> DataFrame: """ We are guaranteed non-Nones in the axes. """ + new_index, row_indexer = self.index.reindex(axes["index"]) new_columns, col_indexer = self.columns.reindex(axes["columns"]) if row_indexer is not None and col_indexer is not None: + # Fastpath. By doing two 'take's at once we avoid making an + # unnecessary copy. + # We only get here with `not self._is_mixed_type`, which (almost) + # ensures that self.values is cheap. It may be worth making this + # condition more specific. indexer = row_indexer, col_indexer - # error: Argument 2 to "take_2d_multi" has incompatible type "Tuple[Any, - # Any]"; expected "ndarray" new_values = take_2d_multi(self.values, indexer, fill_value=fill_value) return self._constructor(new_values, index=new_index, columns=new_columns) else: diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py index 47ae88a52d919..034ca091c6fbf 100644 --- a/pandas/core/indexes/extension.py +++ b/pandas/core/indexes/extension.py @@ -41,7 +41,7 @@ _T = TypeVar("_T", bound="NDArrayBackedExtensionIndex") -def inherit_from_data(name: str, delegate, cache: bool = False, wrap: bool = False): +def _inherit_from_data(name: str, delegate, cache: bool = False, wrap: bool = False): """ Make an alias for a method of the underlying ExtensionArray. @@ -130,7 +130,7 @@ def inherit_names(names: list[str], delegate, cache: bool = False, wrap: bool = def wrapper(cls): for name in names: - meth = inherit_from_data(name, delegate, cache=cache, wrap=wrap) + meth = _inherit_from_data(name, delegate, cache=cache, wrap=wrap) setattr(cls, name, meth) return cls diff --git a/pandas/tests/apply/test_series_apply.py b/pandas/tests/apply/test_series_apply.py index 18f96f9c61ab8..48f984c21623b 100644 --- a/pandas/tests/apply/test_series_apply.py +++ b/pandas/tests/apply/test_series_apply.py @@ -774,7 +774,7 @@ def test_apply_series_on_date_time_index_aware_series(dti, exp, aware): tm.assert_frame_equal(result, exp) -def test_apply_scaler_on_date_time_index_aware_series(): +def test_apply_scalar_on_date_time_index_aware_series(): # GH 25959 # Calling apply on a localized time series should not cause an error series = tm.makeTimeSeries(nper=30).tz_localize("UTC")
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/43977
2021-10-11T19:20:20Z
2021-10-14T23:21:06Z
2021-10-14T23:21:06Z
2021-10-14T23:45:48Z
DOC: bug in user guide after new method
diff --git a/doc/source/user_guide/style.ipynb b/doc/source/user_guide/style.ipynb index 67cf07a718877..1c83b0d3d048b 100644 --- a/doc/source/user_guide/style.ipynb +++ b/doc/source/user_guide/style.ipynb @@ -1288,7 +1288,7 @@ "outputs": [], "source": [ "df2.style.format('{:.3f}', na_rep=\"\")\\\n", - " .bar(align=0, vmin=-2.5, vmax=2.5, color=mpl.cm.get_cmap(\"bwr\"), height=50,\n", + " .bar(align=0, vmin=-2.5, vmax=2.5, cmap=\"bwr\", height=50,\n", " width=60, props=\"width: 120px; border-right: 1px solid black;\")\\\n", " .text_gradient(cmap=\"bwr\", vmin=-2.5, vmax=2.5)" ]
just updating something that I missed on previous pr when arg name was changed.
https://api.github.com/repos/pandas-dev/pandas/pulls/43976
2021-10-11T16:11:30Z
2021-10-14T17:07:13Z
2021-10-14T17:07:13Z
2021-10-15T16:48:20Z
STYLE ban np.testing
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e99fda786ee68..2c76b682ee343 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -135,6 +135,12 @@ repos: entry: 'np\.random\.seed' files: ^asv_bench/benchmarks exclude: ^asv_bench/benchmarks/pandas_vb_common\.py + - id: np-testing-array-equal + name: Check for usage of numpy testing or array_equal + language: pygrep + entry: '(numpy|np)(\.testing|\.array_equal)' + files: ^pandas/tests/ + types: [python] - id: invalid-ea-testing name: Check for invalid EA testing language: pygrep diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py index 0b00ff2dbd861..3a3103b095e45 100644 --- a/pandas/tests/arrays/sparse/test_array.py +++ b/pandas/tests/arrays/sparse/test_array.py @@ -1256,7 +1256,7 @@ def test_to_coo( row_levels=(0, 1), column_levels=(2, 3), sort_labels=sort_labels ) assert isinstance(A, scipy.sparse.coo.coo_matrix) - np.testing.assert_array_equal(A.toarray(), expected_A) + tm.assert_numpy_array_equal(A.toarray(), expected_A) assert rows == expected_rows assert cols == expected_cols
xref https://github.com/pandas-dev/pandas/pull/43674#discussion_r720831806 There used to be a linting rule for this, but I missed it when moving linting rules over to `pandas-dev-flaker` in https://github.com/pandas-dev/pandas/pull/40906 - sorry about that, I thought I'd checked that everything had been preserved Would be good to use this one from `pandas-dev-flaker` directly, but without a `--per-file-selects` option it'd be rather cumbersome (https://github.com/PyCQA/flake8/issues/344#issuecomment-940054283)
https://api.github.com/repos/pandas-dev/pandas/pulls/43971
2021-10-11T13:58:02Z
2021-10-11T22:29:40Z
2021-10-11T22:29:40Z
2021-10-11T22:29:44Z
FIX BUG: Timestamp __add__/__sub__ DateOffset with nanoseconds lost.
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index f50442eb7ca46..d6d3127e67945 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -626,6 +626,7 @@ Datetimelike - Bug in adding a ``np.timedelta64`` object to a :class:`BusinessDay` or :class:`CustomBusinessDay` object incorrectly raising (:issue:`44532`) - Bug in :meth:`Index.insert` for inserting ``np.datetime64``, ``np.timedelta64`` or ``tuple`` into :class:`Index` with ``dtype='object'`` with negative loc adding ``None`` and replacing existing value (:issue:`44509`) - Bug in :meth:`Series.mode` with ``DatetimeTZDtype`` incorrectly returning timezone-naive and ``PeriodDtype`` incorrectly raising (:issue:`41927`) +- Bug in :class:`DateOffset`` addition with :class:`Timestamp` where ``offset.nanoseconds`` would not be included in the result. (:issue:`43968`) - Timedelta diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 7e6d8fa38aa45..6df4abc160b0b 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -186,8 +186,9 @@ def apply_wraps(func): if self.normalize: result = result.normalize() - # nanosecond may be deleted depending on offset process - if not self.normalize and nano != 0: + # If the offset object does not have a nanoseconds component, + # the result's nanosecond component may be lost. + if not self.normalize and nano != 0 and not hasattr(self, "nanoseconds"): if result.nanosecond != nano: if result.tz is not None: # convert to UTC @@ -333,7 +334,7 @@ cdef _determine_offset(kwds): # sub-daily offset - use timedelta (tz-aware) offset = timedelta(**kwds_no_nanos) else: - offset = timedelta(1) + offset = timedelta(0) return offset, use_relativedelta @@ -1068,12 +1069,17 @@ cdef class RelativeDeltaOffset(BaseOffset): # perform calculation in UTC other = other.replace(tzinfo=None) + if hasattr(self, "nanoseconds"): + td_nano = Timedelta(nanoseconds=self.nanoseconds) + else: + td_nano = Timedelta(0) + if self.n > 0: for i in range(self.n): - other = other + self._offset + other = other + self._offset + td_nano else: for i in range(-self.n): - other = other - self._offset + other = other - self._offset - td_nano if tzinfo is not None and self._use_relativedelta: # bring tz back from UTC calculation diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index f491b5aeedadc..1c26793876e5a 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -307,7 +307,6 @@ cdef class _Timestamp(ABCTimestamp): elif not isinstance(self, _Timestamp): # cython semantics, args have been switched and this is __radd__ return other.__add__(self) - return NotImplemented def __sub__(self, other): diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index 6cec35939dff2..5dcfd0019e93f 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -668,14 +668,6 @@ def test_rule_code(self): assert alias == (_get_offset(alias) * 5).rule_code -def test_dateoffset_misc(): - oset = offsets.DateOffset(months=2, days=4) - # it works - oset.freqstr - - assert not offsets.DateOffset(months=2) == 2 - - def test_freq_offsets(): off = BDay(1, offset=timedelta(0, 1800)) assert off.freqstr == "B+30Min" @@ -791,6 +783,54 @@ def test_tick_normalize_raises(tick_classes): cls(n=3, normalize=True) +@pytest.mark.parametrize( + "offset_kwargs, expected_arg", + [ + ({"nanoseconds": 1}, "1970-01-01 00:00:00.000000001"), + ({"nanoseconds": 5}, "1970-01-01 00:00:00.000000005"), + ({"nanoseconds": -1}, "1969-12-31 23:59:59.999999999"), + ({"microseconds": 1}, "1970-01-01 00:00:00.000001"), + ({"microseconds": -1}, "1969-12-31 23:59:59.999999"), + ({"seconds": 1}, "1970-01-01 00:00:01"), + ({"seconds": -1}, "1969-12-31 23:59:59"), + ({"minutes": 1}, "1970-01-01 00:01:00"), + ({"minutes": -1}, "1969-12-31 23:59:00"), + ({"hours": 1}, "1970-01-01 01:00:00"), + ({"hours": -1}, "1969-12-31 23:00:00"), + ({"days": 1}, "1970-01-02 00:00:00"), + ({"days": -1}, "1969-12-31 00:00:00"), + ({"weeks": 1}, "1970-01-08 00:00:00"), + ({"weeks": -1}, "1969-12-25 00:00:00"), + ({"months": 1}, "1970-02-01 00:00:00"), + ({"months": -1}, "1969-12-01 00:00:00"), + ({"years": 1}, "1971-01-01 00:00:00"), + ({"years": -1}, "1969-01-01 00:00:00"), + ], +) +def test_dateoffset_add_sub(offset_kwargs, expected_arg): + offset = DateOffset(**offset_kwargs) + ts = Timestamp(0) + result = ts + offset + expected = Timestamp(expected_arg) + assert result == expected + result -= offset + assert result == ts + result = offset + ts + assert result == expected + + +def test_dataoffset_add_sub_timestamp_with_nano(): + offset = DateOffset(minutes=2, nanoseconds=9) + ts = Timestamp(4) + result = ts + offset + expected = Timestamp("1970-01-01 00:02:00.000000013") + assert result == expected + result -= offset + assert result == ts + result = offset + ts + assert result == expected + + @pytest.mark.parametrize( "attribute", [ @@ -806,3 +846,11 @@ def test_dateoffset_immutable(attribute): msg = "DateOffset objects are immutable" with pytest.raises(AttributeError, match=msg): setattr(offset, attribute, 5) + + +def test_dateoffset_misc(): + oset = offsets.DateOffset(months=2, days=4) + # it works + oset.freqstr + + assert not offsets.DateOffset(months=2) == 2
- [x] closes #43892 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/43968
2021-10-11T13:23:29Z
2021-12-17T22:41:30Z
2021-12-17T22:41:29Z
2021-12-18T07:47:52Z
Example for pandas.DataFrame.to_period docs
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 76a00071c8adc..5df90eda378ee 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -10503,6 +10503,28 @@ def to_period( Returns ------- DataFrame with PeriodIndex + + Examples + -------- + >>> idx = pd.to_datetime( + ... [ + ... "2001-03-31 00:00:00", + ... "2002-05-31 00:00:00", + ... "2003-08-31 00:00:00", + ... ] + ... ) + + >>> idx + DatetimeIndex(['2001-03-31', '2002-05-31', '2003-08-31'], + dtype='datetime64[ns]', freq=None) + + >>> idx.to_period("M") + PeriodIndex(['2001-03', '2002-05', '2003-08'], dtype='period[M]') + + For the yearly frequency + + >>> idx.to_period("Y") + PeriodIndex(['2001', '2002', '2003'], dtype='period[A-DEC]') """ new_obj = self.copy(deep=copy)
- [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry Doc improvement - added the example section for pandas.DataFrame.to_period
https://api.github.com/repos/pandas-dev/pandas/pulls/43967
2021-10-11T10:49:49Z
2021-10-14T22:40:42Z
2021-10-14T22:40:42Z
2021-10-14T22:41:02Z
DOC: Fix Series nlargest and nsmallest doc strings (#43964)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 76a00071c8adc..b266702f9a6d3 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -6597,7 +6597,7 @@ def nlargest(self, n, columns, keep: str = "first") -> DataFrame: - `first` : prioritize the first occurrence(s) - `last` : prioritize the last occurrence(s) - ``all`` : do not drop any duplicates, even it means - selecting more than `n` items. + selecting more than `n` items. Returns ------- diff --git a/pandas/core/series.py b/pandas/core/series.py index 6f48da82169b2..6aefc2c7a5788 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -3692,11 +3692,11 @@ def nlargest(self, n=5, keep="first") -> Series: Series of `n` elements: - ``first`` : return the first `n` occurrences in order - of appearance. + of appearance. - ``last`` : return the last `n` occurrences in reverse - order of appearance. + order of appearance. - ``all`` : keep all occurrences. This can result in a Series of - size larger than `n`. + size larger than `n`. Returns ------- @@ -3790,11 +3790,11 @@ def nsmallest(self, n: int = 5, keep: str = "first") -> Series: Series of `n` elements: - ``first`` : return the first `n` occurrences in order - of appearance. + of appearance. - ``last`` : return the last `n` occurrences in reverse - order of appearance. + order of appearance. - ``all`` : keep all occurrences. This can result in a Series of - size larger than `n`. + size larger than `n`. Returns ------- diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py index 7d89f2e8b7789..9c9e70789390d 100644 --- a/pandas/core/shared_docs.py +++ b/pandas/core/shared_docs.py @@ -492,7 +492,7 @@ ------ AssertionError * If `regex` is not a ``bool`` and `to_replace` is not - ``None``. + ``None``. TypeError * If `to_replace` is not a scalar, array-like, ``dict``, or ``None``
- [x] closes #43937 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] Cancel nlargest and nsmallest's doc list item text bold effect, detail could see below. From ![Clip_20211011_094600](https://user-images.githubusercontent.com/25895405/136727253-30d20e21-ba1c-4be0-b1ad-87239b66d59e.png) To ![Clip_20211011_105541](https://user-images.githubusercontent.com/25895405/136727262-f28a75e0-48e8-478b-a460-7690181d97d0.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/43964
2021-10-11T02:59:44Z
2021-10-12T22:28:00Z
2021-10-12T22:28:00Z
2021-10-15T02:02:53Z
Test that using a `tuple` or `list` for parameter `ascending` of `sort_index` is the same
diff --git a/pandas/tests/frame/methods/test_sort_index.py b/pandas/tests/frame/methods/test_sort_index.py index e0f3286ec1f2f..c1141f705acbc 100644 --- a/pandas/tests/frame/methods/test_sort_index.py +++ b/pandas/tests/frame/methods/test_sort_index.py @@ -785,6 +785,48 @@ def test_sort_index_use_inf_as_na(self): result = expected.sort_index() tm.assert_frame_equal(result, expected) + @pytest.mark.parametrize( + "ascending", + [(True, False), [True, False]], + ) + def test_sort_index_ascending_tuple(self, ascending): + df = DataFrame( + { + "legs": [4, 2, 4, 2, 2], + }, + index=MultiIndex.from_tuples( + [ + ("mammal", "dog"), + ("bird", "duck"), + ("mammal", "horse"), + ("bird", "penguin"), + ("mammal", "kangaroo"), + ], + names=["class", "animal"], + ), + ) + + # parameter `ascending`` is a tuple + result = df.sort_index(level=(0, 1), ascending=ascending) + + expected = DataFrame( + { + "legs": [2, 2, 2, 4, 4], + }, + index=MultiIndex.from_tuples( + [ + ("bird", "penguin"), + ("bird", "duck"), + ("mammal", "kangaroo"), + ("mammal", "horse"), + ("mammal", "dog"), + ], + names=["class", "animal"], + ), + ) + + tm.assert_frame_equal(result, expected) + class TestDataFrameSortIndexKey: def test_sort_multi_index_key(self):
- [x] closes #43884 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry I could not find a suitable place to add a `whatsnew` entry, if there is one I will push another commit.
https://api.github.com/repos/pandas-dev/pandas/pulls/43963
2021-10-10T22:17:27Z
2021-10-16T14:35:19Z
2021-10-16T14:35:19Z
2021-10-16T14:35:23Z
BUG: RangeIndex arithmetic result.name
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 4914d213accac..32f99c7852cca 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -411,6 +411,7 @@ Numeric - Bug in :meth:`DataFrame.rank` treating missing values and extreme values as equal (for example ``np.nan`` and ``np.inf``), causing incorrect results when ``na_option="bottom"`` or ``na_option="top`` used (:issue:`41931`) - Bug in ``numexpr`` engine still being used when the option ``compute.use_numexpr`` is set to ``False`` (:issue:`32556`) - Bug in :class:`DataFrame` arithmetic ops with a subclass whose :meth:`_constructor` attribute is a callable other than the subclass itself (:issue:`43201`) +- Bug in arithmetic operations involving :class:`RangeIndex` where the result would have the incorrect ``name`` (:issue:`43962`) - Conversion diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 4003165a7ddc6..d219361dbdd57 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -885,9 +885,8 @@ def _arith_method(self, other, op): step = op # TODO: if other is a RangeIndex we may have more efficient options - other = extract_array(other, extract_numpy=True, extract_range=True) - - left, right = self, other + right = extract_array(other, extract_numpy=True, extract_range=True) + left = self try: # apply if we have an override @@ -907,7 +906,8 @@ def _arith_method(self, other, op): rstart = op(left.start, right) rstop = op(left.stop, right) - result = type(self)(rstart, rstop, rstep, name=self.name) + res_name = ops.get_op_result_name(self, other) + result = type(self)(rstart, rstop, rstep, name=res_name) # for compat with numpy / Int64Index # even if we can represent as a RangeIndex, return diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py index f06e5e9bdc93b..ec17b9f0bfd52 100644 --- a/pandas/tests/arithmetic/test_numeric.py +++ b/pandas/tests/arithmetic/test_numeric.py @@ -693,11 +693,10 @@ def test_mul_float_series(self, numeric_idx): tm.assert_series_equal(result, expected) def test_mul_index(self, numeric_idx): - # in general not true for RangeIndex idx = numeric_idx - if not isinstance(idx, RangeIndex): - result = idx * idx - tm.assert_index_equal(result, idx ** 2) + + result = idx * idx + tm.assert_index_equal(result, idx ** 2) def test_mul_datelike_raises(self, numeric_idx): idx = numeric_idx @@ -1090,11 +1089,11 @@ def test_ufunc_compat(self, holder): box = Series if holder is Series else Index if holder is RangeIndex: - idx = RangeIndex(0, 5) + idx = RangeIndex(0, 5, name="foo") else: - idx = holder(np.arange(5, dtype="int64")) + idx = holder(np.arange(5, dtype="int64"), name="foo") result = np.sin(idx) - expected = box(np.sin(np.arange(5, dtype="int64"))) + expected = box(np.sin(np.arange(5, dtype="int64")), name="foo") tm.assert_equal(result, expected) @pytest.mark.parametrize("holder", [Int64Index, UInt64Index, Float64Index, Series]) @@ -1212,6 +1211,8 @@ class TestNumericArithmeticUnsorted: def check_binop(self, ops, scalars, idxs): for op in ops: for a, b in combinations(idxs, 2): + a = a._rename("foo") + b = b._rename("bar") result = op(a, b) expected = op(Int64Index(a), Int64Index(b)) tm.assert_index_equal(result, expected)
- [ ] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/43962
2021-10-10T21:48:33Z
2021-10-11T22:06:08Z
2021-10-11T22:06:08Z
2021-10-11T22:21:06Z
ENH: Update isin docs with examples of ~ operator usage (#43959)
diff --git a/doc/source/user_guide/indexing.rst b/doc/source/user_guide/indexing.rst index 584dd0f52ae28..e41f938170417 100644 --- a/doc/source/user_guide/indexing.rst +++ b/doc/source/user_guide/indexing.rst @@ -997,6 +997,15 @@ a list of items you want to check for. df.isin(values) +To return the DataFrame of booleans where the values are *not* in the original DataFrame, +use the ``~`` operator: + +.. ipython:: python + + values = {'ids': ['a', 'b'], 'vals': [1, 3]} + + ~df.isin(values) + Combine DataFrame's ``isin`` with the ``any()`` and ``all()`` methods to quickly select subsets of your data that meet a given criteria. To select a row where each column meets its own criterion: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 76a00071c8adc..6abe82ead45c5 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -10559,6 +10559,13 @@ def isin(self, values) -> DataFrame: falcon True True dog False True + To check if ``values`` is *not* in the DataFrame, use the ``~`` operator: + + >>> ~df.isin([0, 2]) + num_legs num_wings + falcon False False + dog True False + When ``values`` is a dict, we can pass values to check for each column separately: diff --git a/pandas/core/series.py b/pandas/core/series.py index 6f48da82169b2..c9263f6ec1d20 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -5012,6 +5012,17 @@ def isin(self, values) -> Series: 5 False Name: animal, dtype: bool + To invert the boolean values, use the ``~`` operator: + + >>> ~s.isin(['cow', 'lama']) + 0 False + 1 False + 2 False + 3 True + 4 False + 5 True + Name: animal, dtype: bool + Passing a single string as ``s.isin('lama')`` will raise an error. Use a list of one element instead:
- [x] closes #43959 Updated relevant docs to show usage of ~ to emulate `notin` operation.
https://api.github.com/repos/pandas-dev/pandas/pulls/43961
2021-10-10T21:42:10Z
2021-10-12T22:20:29Z
2021-10-12T22:20:29Z
2021-10-12T22:20:33Z
TYP/CLN: mostly in io/html.py
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index d360c450194f1..c29a67c4942db 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -22,7 +22,6 @@ IO, TYPE_CHECKING, Any, - AnyStr, Callable, Hashable, Iterable, @@ -2598,7 +2597,7 @@ def to_stata( writer.write_file() @deprecate_kwarg(old_arg_name="fname", new_arg_name="path") - def to_feather(self, path: FilePathOrBuffer[AnyStr], **kwargs) -> None: + def to_feather(self, path: FilePathOrBuffer[bytes], **kwargs) -> None: """ Write a DataFrame to the binary Feather format. diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py index b1ff188a7906b..145cbe182eadb 100644 --- a/pandas/io/feather_format.py +++ b/pandas/io/feather_format.py @@ -1,6 +1,10 @@ """ feather-format compat """ +from __future__ import annotations -from typing import AnyStr +from typing import ( + Hashable, + Sequence, +) from pandas._typing import ( FilePathOrBuffer, @@ -22,7 +26,7 @@ @doc(storage_options=generic._shared_docs["storage_options"]) def to_feather( df: DataFrame, - path: FilePathOrBuffer[AnyStr], + path: FilePathOrBuffer[bytes], storage_options: StorageOptions = None, **kwargs, ): @@ -89,7 +93,10 @@ def to_feather( @doc(storage_options=generic._shared_docs["storage_options"]) def read_feather( - path, columns=None, use_threads: bool = True, storage_options: StorageOptions = None + path: FilePathOrBuffer[bytes], + columns: Sequence[Hashable] | None = None, + use_threads: bool = True, + storage_options: StorageOptions = None, ): """ Load a feather-format object from the file path. diff --git a/pandas/io/html.py b/pandas/io/html.py index cbf10798a538a..cffe910f1c8ff 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -8,7 +8,6 @@ from collections import abc import numbers -import os import re from typing import ( Pattern, @@ -29,6 +28,8 @@ from pandas.core.frame import DataFrame from pandas.io.common import ( + file_exists, + get_handle, is_url, stringify_path, urlopen, @@ -70,7 +71,7 @@ def _importers(): _RE_WHITESPACE = re.compile(r"[\r\n]+|\s{2,}") -def _remove_whitespace(s: str, regex=_RE_WHITESPACE) -> str: +def _remove_whitespace(s: str, regex: Pattern = _RE_WHITESPACE) -> str: """ Replace extra whitespace inside of a string with a single space. @@ -89,7 +90,7 @@ def _remove_whitespace(s: str, regex=_RE_WHITESPACE) -> str: return regex.sub(" ", s.strip()) -def _get_skiprows(skiprows): +def _get_skiprows(skiprows: int | Sequence[int] | slice | None): """ Get an iterator given an integer, slice or container. @@ -118,7 +119,7 @@ def _get_skiprows(skiprows): raise TypeError(f"{type(skiprows).__name__} is not a valid type for skipping rows") -def _read(obj): +def _read(obj: bytes | FilePathOrBuffer, encoding: str | None) -> str | bytes: """ Try to read from a url, file or string. @@ -130,22 +131,26 @@ def _read(obj): ------- raw_text : str """ - if is_url(obj): - with urlopen(obj) as url: - text = url.read() - elif hasattr(obj, "read"): - text = obj.read() + if ( + is_url(obj) + or hasattr(obj, "read") + or (isinstance(obj, str) and file_exists(obj)) + ): + # error: Argument 1 to "get_handle" has incompatible type "Union[str, bytes, + # Union[IO[Any], RawIOBase, BufferedIOBase, TextIOBase, TextIOWrapper, mmap]]"; + # expected "Union[PathLike[str], Union[str, Union[IO[Any], RawIOBase, + # BufferedIOBase, TextIOBase, TextIOWrapper, mmap]]]" + with get_handle( + obj, "r", encoding=encoding # type: ignore[arg-type] + ) as handles: + text = handles.handle.read() elif isinstance(obj, (str, bytes)): text = obj - try: - if os.path.isfile(text): - with open(text, "rb") as f: - return f.read() - except (TypeError, ValueError): - pass else: raise TypeError(f"Cannot read object of type '{type(obj).__name__}'") - return text + # error: Incompatible return value type (got "Union[Any, bytes, None, str]", + # expected "Union[str, bytes]") + return text # type: ignore[return-value] class _HtmlFrameParser: @@ -204,7 +209,14 @@ class _HtmlFrameParser: functionality. """ - def __init__(self, io, match, attrs, encoding, displayed_only): + def __init__( + self, + io: FilePathOrBuffer, + match: str | Pattern, + attrs: dict[str, str] | None, + encoding: str, + displayed_only: bool, + ): self.io = io self.match = match self.attrs = attrs @@ -590,7 +602,7 @@ def _parse_tfoot_tr(self, table): return table.select("tfoot tr") def _setup_build_doc(self): - raw_text = _read(self.io) + raw_text = _read(self.io, self.encoding) if not raw_text: raise ValueError(f"No text parsed from document: {self.io}") return raw_text @@ -653,9 +665,6 @@ class _LxmlFrameParser(_HtmlFrameParser): :class:`_HtmlFrameParser`. """ - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - def _text_getter(self, obj): return obj.text_content() @@ -818,7 +827,7 @@ def _data_to_frame(**kwargs): } -def _parser_dispatch(flavor): +def _parser_dispatch(flavor: str | None) -> type[_HtmlFrameParser]: """ Choose the parser based on the input flavor. diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py index 1363a0b04ee0a..f6ae5ebfdf526 100644 --- a/pandas/tests/io/test_html.py +++ b/pandas/tests/io/test_html.py @@ -1233,6 +1233,10 @@ def seek(self, offset): def seekable(self): return True + def __iter__(self): + # to fool `is_file_like`, should never end up here + assert False + good = MockFile("<table><tr><td>spam<br />eggs</td></tr></table>") bad = MockFile("<table><tr><td>spam<foobr />eggs</td></tr></table>")
and in io/feather_format.py
https://api.github.com/repos/pandas-dev/pandas/pulls/43958
2021-10-10T17:38:21Z
2021-10-17T21:50:06Z
2021-10-17T21:50:05Z
2021-10-17T21:50:10Z
DOC: cancel replace's doc list item text bold effect
diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py index a3fa24c7ee1e0..7d89f2e8b7789 100644 --- a/pandas/core/shared_docs.py +++ b/pandas/core/shared_docs.py @@ -414,51 +414,51 @@ * numeric, str or regex: - numeric: numeric values equal to `to_replace` will be - replaced with `value` + replaced with `value` - str: string exactly matching `to_replace` will be replaced - with `value` + with `value` - regex: regexs matching `to_replace` will be replaced with - `value` + `value` * list of str, regex, or numeric: - First, if `to_replace` and `value` are both lists, they - **must** be the same length. + **must** be the same length. - Second, if ``regex=True`` then all of the strings in **both** - lists will be interpreted as regexs otherwise they will match - directly. This doesn't matter much for `value` since there - are only a few possible substitution regexes you can use. + lists will be interpreted as regexs otherwise they will match + directly. This doesn't matter much for `value` since there + are only a few possible substitution regexes you can use. - str, regex and numeric rules apply as above. * dict: - Dicts can be used to specify different replacement values - for different existing values. For example, - ``{{'a': 'b', 'y': 'z'}}`` replaces the value 'a' with 'b' and - 'y' with 'z'. To use a dict in this way the `value` - parameter should be `None`. + for different existing values. For example, + ``{{'a': 'b', 'y': 'z'}}`` replaces the value 'a' with 'b' and + 'y' with 'z'. To use a dict in this way the `value` + parameter should be `None`. - For a DataFrame a dict can specify that different values - should be replaced in different columns. For example, - ``{{'a': 1, 'b': 'z'}}`` looks for the value 1 in column 'a' - and the value 'z' in column 'b' and replaces these values - with whatever is specified in `value`. The `value` parameter - should not be ``None`` in this case. You can treat this as a - special case of passing two lists except that you are - specifying the column to search in. + should be replaced in different columns. For example, + ``{{'a': 1, 'b': 'z'}}`` looks for the value 1 in column 'a' + and the value 'z' in column 'b' and replaces these values + with whatever is specified in `value`. The `value` parameter + should not be ``None`` in this case. You can treat this as a + special case of passing two lists except that you are + specifying the column to search in. - For a DataFrame nested dictionaries, e.g., - ``{{'a': {{'b': np.nan}}}}``, are read as follows: look in column - 'a' for the value 'b' and replace it with NaN. The `value` - parameter should be ``None`` to use a nested dict in this - way. You can nest regular expressions as well. Note that - column names (the top-level dictionary keys in a nested - dictionary) **cannot** be regular expressions. + ``{{'a': {{'b': np.nan}}}}``, are read as follows: look in column + 'a' for the value 'b' and replace it with NaN. The `value` + parameter should be ``None`` to use a nested dict in this + way. You can nest regular expressions as well. Note that + column names (the top-level dictionary keys in a nested + dictionary) **cannot** be regular expressions. * None: - This means that the `regex` argument must be a string, - compiled regular expression, or list, dict, ndarray or - Series of such elements. If `value` is also ``None`` then - this **must** be a nested dictionary or Series. + compiled regular expression, or list, dict, ndarray or + Series of such elements. If `value` is also ``None`` then + this **must** be a nested dictionary or Series. See the examples section for examples of each of these. value : scalar, dict, list, str, regex, default None @@ -497,17 +497,17 @@ TypeError * If `to_replace` is not a scalar, array-like, ``dict``, or ``None`` * If `to_replace` is a ``dict`` and `value` is not a ``list``, - ``dict``, ``ndarray``, or ``Series`` + ``dict``, ``ndarray``, or ``Series`` * If `to_replace` is ``None`` and `regex` is not compilable - into a regular expression or is a list, dict, ndarray, or - Series. + into a regular expression or is a list, dict, ndarray, or + Series. * When replacing multiple ``bool`` or ``datetime64`` objects and - the arguments to `to_replace` does not match the type of the - value being replaced + the arguments to `to_replace` does not match the type of the + value being replaced ValueError * If a ``list`` or an ``ndarray`` is passed to `to_replace` and - `value` but they are not the same length. + `value` but they are not the same length. See Also -------- @@ -518,17 +518,17 @@ Notes ----- * Regex substitution is performed under the hood with ``re.sub``. The - rules for substitution for ``re.sub`` are the same. + rules for substitution for ``re.sub`` are the same. * Regular expressions will only substitute on strings, meaning you - cannot provide, for example, a regular expression matching floating - point numbers and expect the columns in your frame that have a - numeric dtype to be matched. However, if those floating point - numbers *are* strings, then you can do this. + cannot provide, for example, a regular expression matching floating + point numbers and expect the columns in your frame that have a + numeric dtype to be matched. However, if those floating point + numbers *are* strings, then you can do this. * This method has *a lot* of options. You are encouraged to experiment - and play with this method to gain intuition about how it works. + and play with this method to gain intuition about how it works. * When dict is used as the `to_replace` value, it is like - key(s) in the dict are the to_replace part and - value(s) in the dict are the value parameter. + key(s) in the dict are the to_replace part and + value(s) in the dict are the value parameter. Examples --------
- [x] closes #43937 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] Cancel replace's doc list item text bold effect, detail could see below. From ![image](https://user-images.githubusercontent.com/25895405/136691591-6aae6597-99e7-4ae4-bca6-1e7380b1cad4.png) To ![image](https://user-images.githubusercontent.com/25895405/136691594-c2b14ad8-c761-49c9-9ba3-b1a472280df7.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/43955
2021-10-10T10:17:20Z
2021-10-10T18:12:43Z
2021-10-10T18:12:42Z
2021-10-10T18:12:46Z
PERF: Index.insert
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index c002832cd89bb..d7a8a2e3d5f87 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -6330,13 +6330,24 @@ def insert(self, loc: int, item) -> Index: dtype = self._find_common_type_compat(item) return self.astype(dtype).insert(loc, item) - arr = np.asarray(self) + arr = self._values + + if arr.dtype != object or not isinstance( + item, (tuple, np.datetime64, np.timedelta64) + ): + # with object-dtype we need to worry about numpy incorrectly casting + # dt64/td64 to integer, also about treating tuples as sequences + # special-casing dt64/td64 https://github.com/numpy/numpy/issues/12550 + casted = arr.dtype.type(item) + new_values = np.insert(arr, loc, casted) + + else: + new_values = np.insert(arr, loc, None) + new_values[loc] = item - # Use constructor to ensure we get tuples cast correctly. # Use self._constructor instead of Index to retain NumericIndex GH#43921 - item = self._constructor([item], dtype=self.dtype)._values - idx = np.concatenate((arr[:loc], item, arr[loc:])) - return self._constructor._with_infer(idx, name=self.name) + # TODO(2.0) can use Index instead of self._constructor + return self._constructor._with_infer(new_values, name=self.name) def drop(self, labels, errors: str_t = "raise") -> Index: """
``` import pandas as pd idx = pd.Index([4, 5, 6]) %timeit idx.insert(1, 19) 26.7 µs ± 299 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each) # <- master 15.5 µs ± 185 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each) # <- PR ```
https://api.github.com/repos/pandas-dev/pandas/pulls/43953
2021-10-10T04:03:53Z
2021-10-10T21:41:14Z
2021-10-10T21:41:14Z
2021-10-10T21:51:37Z
REF: share RangeIndex methods
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index da953fe46ef1d..1c1bc356ef816 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1027,7 +1027,8 @@ def take( taken = algos.take( self._values, indices, allow_fill=allow_fill, fill_value=self._na_value ) - return type(self)._simple_new(taken, name=self.name) + # _constructor so RangeIndex->Int64Index + return self._constructor._simple_new(taken, name=self.name) @final def _maybe_disallow_fill(self, allow_fill: bool, fill_value, indices) -> bool: @@ -1097,7 +1098,8 @@ def repeat(self, repeats, axis=None): nv.validate_repeat((), {"axis": axis}) res_values = self._values.repeat(repeats) - return type(self)._simple_new(res_values, name=self.name) + # _constructor so RangeIndex->Int64Index + return self._constructor._simple_new(res_values, name=self.name) # -------------------------------------------------------------------- # Copying Methods @@ -6298,7 +6300,8 @@ def delete(self: _IndexT, loc) -> _IndexT: Index(['b'], dtype='object') """ res_values = np.delete(self._data, loc) - return type(self)._simple_new(res_values, name=self.name) + # _constructor so RangeIndex->Int64Index + return self._constructor._simple_new(res_values, name=self.name) def insert(self, loc: int, item) -> Index: """ diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 51d9f15390789..4003165a7ddc6 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -26,7 +26,6 @@ cache_readonly, doc, ) -from pandas.util._exceptions import rewrite_exception from pandas.core.dtypes.common import ( ensure_platform_int, @@ -189,17 +188,6 @@ def _data(self) -> np.ndarray: """ return np.arange(self.start, self.stop, self.step, dtype=np.int64) - @cache_readonly - def _cached_int64index(self) -> Int64Index: - return Int64Index._simple_new(self._data, name=self.name) - - @property - def _int64index(self) -> Int64Index: - # wrap _cached_int64index so we can be sure its name matches self.name - res = self._cached_int64index - res._name = self._name - return res - def _get_data_as_items(self): """return a list of tuples of start, stop, step""" rng = self._range @@ -425,24 +413,6 @@ def _get_indexer( # -------------------------------------------------------------------- - def repeat(self, repeats, axis=None) -> Int64Index: - return self._int64index.repeat(repeats, axis=axis) - - def delete(self, loc) -> Int64Index: # type: ignore[override] - return self._int64index.delete(loc) - - def take( - self, indices, axis: int = 0, allow_fill: bool = True, fill_value=None, **kwargs - ) -> Int64Index: - with rewrite_exception("Int64Index", type(self).__name__): - return self._int64index.take( - indices, - axis=axis, - allow_fill=allow_fill, - fill_value=fill_value, - **kwargs, - ) - def tolist(self) -> list[int]: return list(self._range) @@ -683,7 +653,8 @@ def _union(self, other: Index, sort): and (end_s - step_o <= end_o) ): return type(self)(start_r, end_r + step_o, step_o) - return self._int64index._union(other, sort=sort) + + return super()._union(other, sort=sort) def _difference(self, other, sort=None): # optimized set operation if we have another RangeIndex @@ -857,7 +828,8 @@ def __floordiv__(self, other): start = self.start // other new_range = range(start, start + 1, 1) return self._simple_new(new_range, name=self.name) - return self._int64index // other + + return super().__floordiv__(other) # -------------------------------------------------------------------- # Reductions @@ -891,21 +863,22 @@ def _arith_method(self, other, op): elif isinstance(other, (timedelta, np.timedelta64)): # GH#19333 is_integer evaluated True on timedelta64, # so we need to catch these explicitly - return op(self._int64index, other) + return super()._arith_method(other, op) elif is_timedelta64_dtype(other): # Must be an np.ndarray; GH#22390 - return op(self._int64index, other) + return super()._arith_method(other, op) if op in [ operator.pow, ops.rpow, operator.mod, ops.rmod, + operator.floordiv, ops.rfloordiv, divmod, ops.rdivmod, ]: - return op(self._int64index, other) + return super()._arith_method(other, op) step: Callable | None = None if op in [operator.mul, ops.rmul, operator.truediv, ops.rtruediv]: @@ -946,5 +919,5 @@ def _arith_method(self, other, op): except (ValueError, TypeError, ZeroDivisionError): # Defer to Int64Index implementation - return op(self._int64index, other) - # TODO: Do attrs get handled reliably? + # test_arithmetic_explicit_conversions + return super()._arith_method(other, op) diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py index 2cdc35bdf51cb..f06e5e9bdc93b 100644 --- a/pandas/tests/arithmetic/test_numeric.py +++ b/pandas/tests/arithmetic/test_numeric.py @@ -1311,18 +1311,22 @@ def test_numeric_compat2(self): # __pow__ idx = RangeIndex(0, 1000, 2) result = idx ** 2 - expected = idx._int64index ** 2 + expected = Int64Index(idx._values) ** 2 tm.assert_index_equal(Index(result.values), expected, exact=True) # __floordiv__ cases_exact = [ (RangeIndex(0, 1000, 2), 2, RangeIndex(0, 500, 1)), (RangeIndex(-99, -201, -3), -3, RangeIndex(33, 67, 1)), - (RangeIndex(0, 1000, 1), 2, RangeIndex(0, 1000, 1)._int64index // 2), + ( + RangeIndex(0, 1000, 1), + 2, + Int64Index(RangeIndex(0, 1000, 1)._values) // 2, + ), ( RangeIndex(0, 100, 1), 2.0, - RangeIndex(0, 100, 1)._int64index // 2.0, + Int64Index(RangeIndex(0, 100, 1)._values) // 2.0, ), (RangeIndex(0), 50, RangeIndex(0)), (RangeIndex(2, 4, 2), 3, RangeIndex(0, 1, 1)), diff --git a/pandas/tests/indexes/ranges/test_range.py b/pandas/tests/indexes/ranges/test_range.py index 7dcdb627b9abb..e064974f0e006 100644 --- a/pandas/tests/indexes/ranges/test_range.py +++ b/pandas/tests/indexes/ranges/test_range.py @@ -299,12 +299,12 @@ def test_identical(self, simple_index): def test_nbytes(self): # memory savings vs int index - i = RangeIndex(0, 1000) - assert i.nbytes < i._int64index.nbytes / 10 + idx = RangeIndex(0, 1000) + assert idx.nbytes < Int64Index(idx.values).nbytes / 10 # constant memory usage i2 = RangeIndex(0, 10) - assert i.nbytes == i2.nbytes + assert idx.nbytes == i2.nbytes @pytest.mark.parametrize( "start,stop,step", diff --git a/pandas/tests/indexes/ranges/test_setops.py b/pandas/tests/indexes/ranges/test_setops.py index 6dc47b7fef5ac..71fd5396b850b 100644 --- a/pandas/tests/indexes/ranges/test_setops.py +++ b/pandas/tests/indexes/ranges/test_setops.py @@ -286,7 +286,7 @@ def test_union_sorted(self, unions): tm.assert_index_equal(res1, expected_notsorted, exact=True) res2 = idx2.union(idx1, sort=None) - res3 = idx1._int64index.union(idx2, sort=None) + res3 = Int64Index(idx1._values, name=idx1.name).union(idx2, sort=None) tm.assert_index_equal(res2, expected_sorted, exact=True) tm.assert_index_equal(res3, expected_sorted) @@ -322,11 +322,11 @@ def test_difference_mismatched_step(self): obj = RangeIndex.from_range(range(1, 10), name="foo") result = obj.difference(obj[::2]) - expected = obj[1::2]._int64index + expected = Int64Index(obj[1::2]._values, name=obj.name) tm.assert_index_equal(result, expected, exact=True) result = obj.difference(obj[1::2]) - expected = obj[::2]._int64index + expected = Int64Index(obj[::2]._values, name=obj.name) tm.assert_index_equal(result, expected, exact=True) def test_symmetric_difference(self): diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py index 62aae33134f60..a7f6c47db916d 100644 --- a/pandas/tests/reductions/test_reductions.py +++ b/pandas/tests/reductions/test_reductions.py @@ -221,7 +221,7 @@ class TestIndexReductions: def test_max_min_range(self, start, stop, step): # GH#17607 idx = RangeIndex(start, stop, step) - expected = idx._int64index.max() + expected = idx._values.max() result = idx.max() assert result == expected @@ -229,7 +229,7 @@ def test_max_min_range(self, start, stop, step): result2 = idx.max(skipna=False) assert result2 == expected - expected = idx._int64index.min() + expected = idx._values.min() result = idx.min() assert result == expected @@ -431,13 +431,11 @@ def test_numpy_minmax_range(self): # GH#26125 idx = RangeIndex(0, 10, 3) - expected = idx._int64index.max() result = np.max(idx) - assert result == expected + assert result == 9 - expected = idx._int64index.min() result = np.min(idx) - assert result == expected + assert result == 0 errmsg = "the 'out' parameter is not supported" with pytest.raises(ValueError, match=errmsg):
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/43952
2021-10-10T03:42:18Z
2021-10-10T17:20:56Z
2021-10-10T17:20:56Z
2021-10-10T18:04:09Z
TYP: Use Protocols for file-like objects in read/to_*
diff --git a/pandas/_testing/_io.py b/pandas/_testing/_io.py index c7113e663789b..437e75be0e55b 100644 --- a/pandas/_testing/_io.py +++ b/pandas/_testing/_io.py @@ -10,7 +10,10 @@ ) import zipfile -from pandas._typing import FilePathOrBuffer +from pandas._typing import ( + FilePath, + ReadPickleBuffer, +) from pandas.compat import ( get_lzma_file, import_lzma, @@ -277,7 +280,7 @@ def can_connect(url, error_classes=None): def round_trip_pickle( - obj: Any, path: FilePathOrBuffer | None = None + obj: Any, path: FilePath | ReadPickleBuffer | None = None ) -> DataFrame | Series: """ Pickle an object and then read it again. diff --git a/pandas/_typing.py b/pandas/_typing.py index 85e29681285f4..89e1c0bf7a71f 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -1,28 +1,24 @@ +from __future__ import annotations + from datetime import ( datetime, timedelta, tzinfo, ) -from io import ( - BufferedIOBase, - RawIOBase, - TextIOBase, -) -from mmap import mmap from os import PathLike from typing import ( - IO, TYPE_CHECKING, Any, - AnyStr, Callable, Collection, Dict, Hashable, + Iterator, List, Literal, Mapping, Optional, + Protocol, Sequence, Tuple, Type as type_t, @@ -169,9 +165,76 @@ PythonFuncType = Callable[[Any], Any] # filenames and file-like-objects -Buffer = Union[IO[AnyStr], RawIOBase, BufferedIOBase, TextIOBase, mmap] -FileOrBuffer = Union[str, Buffer[AnyStr]] -FilePathOrBuffer = Union["PathLike[str]", FileOrBuffer[AnyStr]] +AnyStr_cov = TypeVar("AnyStr_cov", str, bytes, covariant=True) +AnyStr_con = TypeVar("AnyStr_con", str, bytes, contravariant=True) + + +class BaseBuffer(Protocol): + @property + def mode(self) -> str: + # for _get_filepath_or_buffer + ... + + def fileno(self) -> int: + # for _MMapWrapper + ... + + def seek(self, __offset: int, __whence: int = ...) -> int: + # with one argument: gzip.GzipFile, bz2.BZ2File + # with two arguments: zip.ZipFile, read_sas + ... + + def seekable(self) -> bool: + # for bz2.BZ2File + ... + + def tell(self) -> int: + # for zip.ZipFile, read_stata, to_stata + ... + + +class ReadBuffer(BaseBuffer, Protocol[AnyStr_cov]): + def read(self, __n: int | None = ...) -> AnyStr_cov: + # for BytesIOWrapper, gzip.GzipFile, bz2.BZ2File + ... + + +class WriteBuffer(BaseBuffer, Protocol[AnyStr_con]): + def write(self, __b: AnyStr_con) -> Any: + # for gzip.GzipFile, bz2.BZ2File + ... + + def flush(self) -> Any: + # for gzip.GzipFile, bz2.BZ2File + ... + + +class ReadPickleBuffer(ReadBuffer[bytes], Protocol): + def readline(self) -> AnyStr_cov: + ... + + +class WriteExcelBuffer(WriteBuffer[bytes], Protocol): + def truncate(self, size: int | None = ...) -> int: + ... + + +class ReadCsvBuffer(ReadBuffer[AnyStr_cov], Protocol): + def __iter__(self) -> Iterator[AnyStr_cov]: + # for engine=python + ... + + def readline(self) -> AnyStr_cov: + # for engine=python + ... + + @property + def closed(self) -> bool: + # for enine=pyarrow + ... + + +FilePath = Union[str, "PathLike[str]"] # for arbitrary kwargs passed during reading/writing files StorageOptions = Optional[Dict[str, Any]] diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b88c97b8e988d..0d1b8e995f18c 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -16,7 +16,6 @@ import functools from io import StringIO import itertools -import mmap from textwrap import dedent from typing import ( IO, @@ -55,7 +54,7 @@ CompressionOptions, Dtype, DtypeObj, - FilePathOrBuffer, + FilePath, FillnaOptions, FloatFormatType, FormattersType, @@ -71,6 +70,7 @@ TimedeltaConvertibleTypes, TimestampConvertibleTypes, ValueKeyFunc, + WriteBuffer, npt, ) from pandas.compat._optional import import_optional_dependency @@ -1056,7 +1056,7 @@ def _repr_html_(self) -> str | None: @Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring) def to_string( self, - buf: FilePathOrBuffer[str] | None = None, + buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[str] | None = None, col_space: int | None = None, header: bool | Sequence[str] = True, @@ -2432,7 +2432,7 @@ def _from_arrays( @deprecate_kwarg(old_arg_name="fname", new_arg_name="path") def to_stata( self, - path: FilePathOrBuffer, + path: FilePath | WriteBuffer[bytes], convert_dates: dict[Hashable, str] | None = None, write_index: bool = True, byteorder: str | None = None, @@ -2454,11 +2454,9 @@ def to_stata( Parameters ---------- - path : str, buffer or path object - String, path object (pathlib.Path or py._path.local.LocalPath) or - object implementing a binary write() function. If using a buffer - then the buffer will not be automatically closed after the file - data has been written. + path : str, path object, or buffer + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a binary ``write()`` function. .. versionchanged:: 1.0.0 @@ -2600,14 +2598,16 @@ def to_stata( writer.write_file() @deprecate_kwarg(old_arg_name="fname", new_arg_name="path") - def to_feather(self, path: FilePathOrBuffer[bytes], **kwargs) -> None: + def to_feather(self, path: FilePath | WriteBuffer[bytes], **kwargs) -> None: """ Write a DataFrame to the binary Feather format. Parameters ---------- - path : str or file-like object - If a string, it will be used as Root Directory path. + path : str, path object, file-like object + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a binary ``write()`` function. If a string or a path, + it will be used as Root Directory path when writing a partitioned dataset. **kwargs : Additional keywords passed to :func:`pyarrow.feather.write_feather`. Starting with pyarrow 0.17, this includes the `compression`, @@ -2677,15 +2677,14 @@ def to_markdown( return result with get_handle(buf, mode, storage_options=storage_options) as handles: - assert not isinstance(handles.handle, (str, mmap.mmap)) - handles.handle.writelines(result) + handles.handle.write(result) return None @doc(storage_options=generic._shared_docs["storage_options"]) @deprecate_kwarg(old_arg_name="fname", new_arg_name="path") def to_parquet( self, - path: FilePathOrBuffer | None = None, + path: FilePath | WriteBuffer[bytes] | None = None, engine: str = "auto", compression: str | None = "snappy", index: bool | None = None, @@ -2703,13 +2702,11 @@ def to_parquet( Parameters ---------- - path : str or file-like object, default None - If a string, it will be used as Root Directory path - when writing a partitioned dataset. By file-like object, - we refer to objects with a write() method, such as a file handle - (e.g. via builtin open function) or io.BytesIO. The engine - fastparquet does not accept file-like objects. If path is None, - a bytes object is returned. + path : str, path object, file-like object, or None, default None + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a binary ``write()`` function. If None, the result is + returned as bytes. If a string or path, it will be used as Root Directory + path when writing a partitioned dataset. .. versionchanged:: 1.2.0 @@ -2804,7 +2801,7 @@ def to_parquet( @Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring) def to_html( self, - buf: FilePathOrBuffer[str] | None = None, + buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[str] | None = None, col_space: ColspaceArgType | None = None, header: bool | Sequence[str] = True, @@ -2891,7 +2888,7 @@ def to_html( @doc(storage_options=generic._shared_docs["storage_options"]) def to_xml( self, - path_or_buffer: FilePathOrBuffer | None = None, + path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, index: bool = True, root_name: str | None = "data", row_name: str | None = "row", @@ -2904,7 +2901,7 @@ def to_xml( xml_declaration: bool | None = True, pretty_print: bool | None = True, parser: str | None = "lxml", - stylesheet: FilePathOrBuffer | None = None, + stylesheet: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ) -> str | None: @@ -2915,9 +2912,10 @@ def to_xml( Parameters ---------- - path_or_buffer : str, path object or file-like object, optional - File to write output to. If None, the output is returned as a - string. + path_or_buffer : str, path object, file-like object, or None, default None + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a ``write()`` function. If None, the result is returned + as a string. index : bool, default True Whether to include index in XML document. root_name : str, default 'data' @@ -3211,7 +3209,7 @@ def to_xml( def info( self, verbose: bool | None = None, - buf: IO[str] | None = None, + buf: WriteBuffer[str] | None = None, max_cols: int | None = None, memory_usage: bool | str | None = None, show_counts: bool | None = None, diff --git a/pandas/core/generic.py b/pandas/core/generic.py index fd8af2c0cedd0..0945193673107 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -12,7 +12,6 @@ from typing import ( TYPE_CHECKING, Any, - AnyStr, Callable, Hashable, Literal, @@ -44,7 +43,7 @@ Dtype, DtypeArg, DtypeObj, - FilePathOrBuffer, + FilePath, IndexKeyFunc, IndexLabel, JSONSerializable, @@ -58,6 +57,7 @@ TimedeltaConvertibleTypes, TimestampConvertibleTypes, ValueKeyFunc, + WriteBuffer, npt, ) from pandas.compat._optional import import_optional_dependency @@ -2332,7 +2332,7 @@ def to_excel( @doc(storage_options=_shared_docs["storage_options"]) def to_json( self, - path_or_buf: FilePathOrBuffer | None = None, + path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, orient: str | None = None, date_format: str | None = None, double_precision: int = 10, @@ -2353,9 +2353,10 @@ def to_json( Parameters ---------- - path_or_buf : str or file handle, optional - File path or object. If not specified, the result is returned as - a string. + path_or_buf : str, path object, file-like object, or None, default None + String, path object (implementing os.PathLike[str]), or file-like + object implementing a write() function. If None, the result is + returned as a string. orient : str Indication of expected JSON string format. @@ -3337,7 +3338,7 @@ def to_latex( @doc(storage_options=_shared_docs["storage_options"]) def to_csv( self, - path_or_buf: FilePathOrBuffer[AnyStr] | None = None, + path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, sep: str = ",", na_rep: str = "", float_format: str | None = None, @@ -3364,10 +3365,11 @@ def to_csv( Parameters ---------- - path_or_buf : str or file handle, default None - File path or object, if None is provided the result is returned as - a string. If a non-binary file object is passed, it should be opened - with `newline=''`, disabling universal newlines. If a binary + path_or_buf : str, path object, file-like object, or None, default None + String, path object (implementing os.PathLike[str]), or file-like + object implementing a write() function. If None, the result is + returned as a string. If a non-binary file object is passed, it should + be opened with `newline=''`, disabling universal newlines. If a binary file object is passed, `mode` might need to contain a `'b'`. .. versionchanged:: 1.2.0 diff --git a/pandas/io/common.py b/pandas/io/common.py index 1e928d1f2cd9e..fa94319c75fa9 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -26,6 +26,7 @@ Generic, Literal, Mapping, + TypeVar, cast, overload, ) @@ -40,12 +41,13 @@ import zipfile from pandas._typing import ( - Buffer, + BaseBuffer, CompressionDict, CompressionOptions, - FileOrBuffer, - FilePathOrBuffer, + FilePath, + ReadBuffer, StorageOptions, + WriteBuffer, ) from pandas.compat import ( get_lzma_file, @@ -61,19 +63,16 @@ _VALID_URLS = set(uses_relative + uses_netloc + uses_params) _VALID_URLS.discard("") +BaseBufferT = TypeVar("BaseBufferT", bound=BaseBuffer) + @dataclasses.dataclass class IOArgs: """ Return value of io/common.py:_get_filepath_or_buffer. - - Note (copy&past from io/parsers): - filepath_or_buffer can be Union[FilePathOrBuffer, s3fs.S3File, gcsfs.GCSFile] - though mypy handling of conditional imports is difficult. - See https://github.com/python/mypy/issues/1297 """ - filepath_or_buffer: FileOrBuffer + filepath_or_buffer: str | BaseBuffer encoding: str mode: str compression: CompressionDict @@ -95,9 +94,10 @@ class IOHandles(Generic[AnyStr]): is_wrapped: Whether a TextIOWrapper needs to be detached. """ - handle: Buffer[AnyStr] + # handle might not implement the IO-interface + handle: IO[AnyStr] compression: CompressionDict - created_handles: list[Buffer] = dataclasses.field(default_factory=list) + created_handles: list[IO[bytes] | IO[str]] = dataclasses.field(default_factory=list) is_wrapped: bool = False is_mmap: bool = False @@ -128,7 +128,7 @@ def __exit__(self, *args: Any) -> None: self.close() -def is_url(url) -> bool: +def is_url(url: object) -> bool: """ Check to see if a URL has a valid protocol. @@ -146,7 +146,17 @@ def is_url(url) -> bool: return parse_url(url).scheme in _VALID_URLS -def _expand_user(filepath_or_buffer: FileOrBuffer[AnyStr]) -> FileOrBuffer[AnyStr]: +@overload +def _expand_user(filepath_or_buffer: str) -> str: + ... + + +@overload +def _expand_user(filepath_or_buffer: BaseBufferT) -> BaseBufferT: + ... + + +def _expand_user(filepath_or_buffer: str | BaseBufferT) -> str | BaseBufferT: """ Return the argument with an initial component of ~ or ~user replaced by that user's home directory. @@ -174,10 +184,22 @@ def validate_header_arg(header) -> None: ) +@overload +def stringify_path(filepath_or_buffer: FilePath, convert_file_like: bool = ...) -> str: + ... + + +@overload def stringify_path( - filepath_or_buffer: FilePathOrBuffer[AnyStr], + filepath_or_buffer: BaseBufferT, convert_file_like: bool = ... +) -> BaseBufferT: + ... + + +def stringify_path( + filepath_or_buffer: FilePath | BaseBufferT, convert_file_like: bool = False, -) -> FileOrBuffer[AnyStr]: +) -> str | BaseBufferT: """ Attempt to convert a path-like object to a string. @@ -201,7 +223,7 @@ def stringify_path( # GH 38125: some fsspec objects implement os.PathLike but have already opened a # file. This prevents opening the file a second time. infer_compression calls # this function with convert_file_like=True to infer the compression. - return cast(FileOrBuffer[AnyStr], filepath_or_buffer) + return cast(BaseBufferT, filepath_or_buffer) if isinstance(filepath_or_buffer, os.PathLike): filepath_or_buffer = filepath_or_buffer.__fspath__() @@ -218,7 +240,7 @@ def urlopen(*args, **kwargs): return urllib.request.urlopen(*args, **kwargs) -def is_fsspec_url(url: FilePathOrBuffer) -> bool: +def is_fsspec_url(url: FilePath | BaseBuffer) -> bool: """ Returns true if the given URL looks like something fsspec can handle @@ -231,7 +253,7 @@ def is_fsspec_url(url: FilePathOrBuffer) -> bool: def _get_filepath_or_buffer( - filepath_or_buffer: FilePathOrBuffer, + filepath_or_buffer: FilePath | BaseBuffer, encoding: str = "utf-8", compression: CompressionOptions = None, mode: str = "r", @@ -393,7 +415,11 @@ def _get_filepath_or_buffer( mode=mode, ) - if not is_file_like(filepath_or_buffer): + # is_file_like requires (read | write) & __iter__ but __iter__ is only + # needed for read_csv(engine=python) + if not ( + hasattr(filepath_or_buffer, "read") or hasattr(filepath_or_buffer, "write") + ): msg = f"Invalid file path or buffer object type: {type(filepath_or_buffer)}" raise ValueError(msg) @@ -463,7 +489,7 @@ def get_compression_method( def infer_compression( - filepath_or_buffer: FilePathOrBuffer, compression: str | None + filepath_or_buffer: FilePath | BaseBuffer, compression: str | None ) -> str | None: """ Get the compression method for filepath_or_buffer. If compression='infer', @@ -538,7 +564,7 @@ def check_parent_directory(path: Path | str) -> None: @overload def get_handle( - path_or_buf: FilePathOrBuffer, + path_or_buf: FilePath | BaseBuffer, mode: str, *, encoding: str | None = ..., @@ -553,7 +579,7 @@ def get_handle( @overload def get_handle( - path_or_buf: FilePathOrBuffer, + path_or_buf: FilePath | BaseBuffer, mode: str, *, encoding: str | None = ..., @@ -567,7 +593,7 @@ def get_handle( def get_handle( - path_or_buf: FilePathOrBuffer, + path_or_buf: FilePath | BaseBuffer, mode: str, *, encoding: str | None = None, @@ -649,7 +675,7 @@ def get_handle( ) handle = ioargs.filepath_or_buffer - handles: list[Buffer] + handles: list[BaseBuffer] # memory mapping needs to be the first step handle, memory_map, handles = _maybe_memory_map( @@ -677,17 +703,18 @@ def get_handle( if compression == "gzip": if is_path: assert isinstance(handle, str) - handle = gzip.GzipFile( + # error: Incompatible types in assignment (expression has type + # "GzipFile", variable has type "Union[str, BaseBuffer]") + handle = gzip.GzipFile( # type: ignore[assignment] filename=handle, mode=ioargs.mode, **compression_args, ) else: handle = gzip.GzipFile( - # error: Argument "fileobj" to "GzipFile" has incompatible type - # "Union[str, Union[IO[Any], RawIOBase, BufferedIOBase, TextIOBase, - # TextIOWrapper, mmap]]"; expected "Optional[IO[bytes]]" - fileobj=handle, # type: ignore[arg-type] + # No overload variant of "GzipFile" matches argument types + # "Union[str, BaseBuffer]", "str", "Dict[str, Any]" + fileobj=handle, # type: ignore[call-overload] mode=ioargs.mode, **compression_args, ) @@ -706,7 +733,12 @@ def get_handle( # ZIP Compression elif compression == "zip": - handle = _BytesZipFile(handle, ioargs.mode, **compression_args) + # error: Argument 1 to "_BytesZipFile" has incompatible type "Union[str, + # BaseBuffer]"; expected "Union[Union[str, PathLike[str]], + # ReadBuffer[bytes], WriteBuffer[bytes]]" + handle = _BytesZipFile( + handle, ioargs.mode, **compression_args # type: ignore[arg-type] + ) if handle.mode == "r": handles.append(handle) zip_names = handle.namelist() @@ -787,10 +819,14 @@ def get_handle( assert not isinstance(ioargs.filepath_or_buffer, str) handles.append(ioargs.filepath_or_buffer) - assert not isinstance(handle, str) return IOHandles( - handle=handle, - created_handles=handles, + # error: Argument "handle" to "IOHandles" has incompatible type + # "Union[TextIOWrapper, GzipFile, BaseBuffer, typing.IO[bytes], + # typing.IO[Any]]"; expected "pandas._typing.IO[Any]" + handle=handle, # type: ignore[arg-type] + # error: Argument "created_handles" to "IOHandles" has incompatible type + # "List[BaseBuffer]"; expected "List[Union[IO[bytes], IO[str]]]" + created_handles=handles, # type: ignore[arg-type] is_wrapped=is_wrapped, is_mmap=memory_map, compression=ioargs.compression, @@ -821,7 +857,7 @@ class _BytesZipFile(zipfile.ZipFile, BytesIO): # type: ignore[misc] # GH 17778 def __init__( self, - file: FilePathOrBuffer, + file: FilePath | ReadBuffer[bytes] | WriteBuffer[bytes], mode: str, archive_name: str | None = None, **kwargs, @@ -974,15 +1010,15 @@ def detach(self): def _maybe_memory_map( - handle: FileOrBuffer, + handle: str | BaseBuffer, memory_map: bool, encoding: str, mode: str, errors: str | None, decode: bool, -) -> tuple[FileOrBuffer, bool, list[Buffer]]: +) -> tuple[str | BaseBuffer, bool, list[BaseBuffer]]: """Try to memory map file/buffer.""" - handles: list[Buffer] = [] + handles: list[BaseBuffer] = [] memory_map &= hasattr(handle, "fileno") or isinstance(handle, str) if not memory_map: return handle, memory_map, handles @@ -1001,10 +1037,11 @@ def _maybe_memory_map( # error: Argument 1 to "_MMapWrapper" has incompatible type "Union[IO[Any], # RawIOBase, BufferedIOBase, TextIOBase, mmap]"; expected "IO[Any]" wrapped = cast( - mmap.mmap, + BaseBuffer, _MMapWrapper(handle, encoding, errors, decode), # type: ignore[arg-type] ) - handle.close() + # error: "BaseBuffer" has no attribute "close" + handle.close() # type: ignore[attr-defined] handles.remove(handle) handles.append(wrapped) handle = wrapped @@ -1018,7 +1055,7 @@ def _maybe_memory_map( return handle, memory_map, handles -def file_exists(filepath_or_buffer: FilePathOrBuffer) -> bool: +def file_exists(filepath_or_buffer: FilePath | BaseBuffer) -> bool: """Test whether file exists.""" exists = False filepath_or_buffer = stringify_path(filepath_or_buffer) @@ -1032,7 +1069,7 @@ def file_exists(filepath_or_buffer: FilePathOrBuffer) -> bool: return exists -def _is_binary_mode(handle: FilePathOrBuffer, mode: str) -> bool: +def _is_binary_mode(handle: FilePath | BaseBuffer, mode: str) -> bool: """Whether the handle is opened in binary mode""" # specified by user if "t" in mode or "b" in mode: diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 22fbaaaa8b2f8..04052b0fe9fdf 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -6,6 +6,7 @@ import os from textwrap import fill from typing import ( + IO, Any, Mapping, cast, @@ -17,10 +18,11 @@ from pandas._libs.parsers import STR_NA_VALUES from pandas._typing import ( - Buffer, DtypeArg, - FilePathOrBuffer, + FilePath, + ReadBuffer, StorageOptions, + WriteExcelBuffer, ) from pandas.compat._optional import ( get_version, @@ -816,7 +818,7 @@ class ExcelWriter(metaclass=abc.ABCMeta): # ExcelWriter. def __new__( cls, - path: FilePathOrBuffer | ExcelWriter, + path: FilePath | WriteExcelBuffer | ExcelWriter, engine=None, date_format=None, datetime_format=None, @@ -918,7 +920,7 @@ def save(self): def __init__( self, - path: FilePathOrBuffer | ExcelWriter, + path: FilePath | WriteExcelBuffer | ExcelWriter, engine=None, date_format=None, datetime_format=None, @@ -942,7 +944,7 @@ def __init__( # cast ExcelWriter to avoid adding 'if self.handles is not None' self.handles = IOHandles( - cast(Buffer[bytes], path), compression={"copression": None} + cast(IO[bytes], path), compression={"copression": None} ) if not isinstance(path, ExcelWriter): self.handles = get_handle( @@ -1061,7 +1063,7 @@ def close(self): @doc(storage_options=_shared_docs["storage_options"]) def inspect_excel_format( - content_or_path: FilePathOrBuffer, + content_or_path: FilePath | ReadBuffer[bytes], storage_options: StorageOptions = None, ) -> str | None: """ @@ -1108,9 +1110,7 @@ def inspect_excel_format( elif not peek.startswith(ZIP_SIGNATURE): return None - # ZipFile typing is overly-strict - # https://github.com/python/typeshed/issues/4212 - zf = zipfile.ZipFile(stream) # type: ignore[arg-type] + zf = zipfile.ZipFile(stream) # Workaround for some third party files that use forward slashes and # lower case names. diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py index e0c5a2c6a7ff9..952ad72b480b7 100644 --- a/pandas/io/excel/_odfreader.py +++ b/pandas/io/excel/_odfreader.py @@ -3,7 +3,8 @@ import numpy as np from pandas._typing import ( - FilePathOrBuffer, + FilePath, + ReadBuffer, Scalar, StorageOptions, ) @@ -28,7 +29,7 @@ class ODFReader(BaseExcelReader): def __init__( self, - filepath_or_buffer: FilePathOrBuffer, + filepath_or_buffer: FilePath | ReadBuffer[bytes], storage_options: StorageOptions = None, ): import_optional_dependency("odf") @@ -40,7 +41,7 @@ def _workbook_class(self): return OpenDocument - def load_workbook(self, filepath_or_buffer: FilePathOrBuffer): + def load_workbook(self, filepath_or_buffer: FilePath | ReadBuffer[bytes]): from odf.opendocument import load return load(filepath_or_buffer) diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py index d499f1a5ea89f..f34bf311e6ce7 100644 --- a/pandas/io/excel/_openpyxl.py +++ b/pandas/io/excel/_openpyxl.py @@ -9,7 +9,8 @@ import numpy as np from pandas._typing import ( - FilePathOrBuffer, + FilePath, + ReadBuffer, Scalar, StorageOptions, ) @@ -505,7 +506,7 @@ def write_cells( class OpenpyxlReader(BaseExcelReader): def __init__( self, - filepath_or_buffer: FilePathOrBuffer, + filepath_or_buffer: FilePath | ReadBuffer[bytes], storage_options: StorageOptions = None, ) -> None: """ @@ -527,7 +528,7 @@ def _workbook_class(self): return Workbook - def load_workbook(self, filepath_or_buffer: FilePathOrBuffer): + def load_workbook(self, filepath_or_buffer: FilePath | ReadBuffer[bytes]): from openpyxl import load_workbook return load_workbook( diff --git a/pandas/io/excel/_pyxlsb.py b/pandas/io/excel/_pyxlsb.py index 4b2b9f7a3a678..9284cf917a48c 100644 --- a/pandas/io/excel/_pyxlsb.py +++ b/pandas/io/excel/_pyxlsb.py @@ -2,7 +2,8 @@ from __future__ import annotations from pandas._typing import ( - FilePathOrBuffer, + FilePath, + ReadBuffer, Scalar, StorageOptions, ) @@ -14,7 +15,7 @@ class PyxlsbReader(BaseExcelReader): def __init__( self, - filepath_or_buffer: FilePathOrBuffer, + filepath_or_buffer: FilePath | ReadBuffer[bytes], storage_options: StorageOptions = None, ): """ @@ -38,7 +39,7 @@ def _workbook_class(self): return Workbook - def load_workbook(self, filepath_or_buffer: FilePathOrBuffer): + def load_workbook(self, filepath_or_buffer: FilePath | ReadBuffer[bytes]): from pyxlsb import open_workbook # TODO: hack in buffer capability diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py index 145cbe182eadb..e4547b527a6b9 100644 --- a/pandas/io/feather_format.py +++ b/pandas/io/feather_format.py @@ -7,8 +7,10 @@ ) from pandas._typing import ( - FilePathOrBuffer, + FilePath, + ReadBuffer, StorageOptions, + WriteBuffer, ) from pandas.compat._optional import import_optional_dependency from pandas.util._decorators import doc @@ -26,7 +28,7 @@ @doc(storage_options=generic._shared_docs["storage_options"]) def to_feather( df: DataFrame, - path: FilePathOrBuffer[bytes], + path: FilePath | WriteBuffer[bytes], storage_options: StorageOptions = None, **kwargs, ): @@ -36,7 +38,7 @@ def to_feather( Parameters ---------- df : DataFrame - path : string file path, or file-like object + path : str, path object, or file-like object {storage_options} .. versionadded:: 1.2.0 @@ -93,7 +95,7 @@ def to_feather( @doc(storage_options=generic._shared_docs["storage_options"]) def read_feather( - path: FilePathOrBuffer[bytes], + path: FilePath | ReadBuffer[bytes], columns: Sequence[Hashable] | None = None, use_threads: bool = True, storage_options: StorageOptions = None, @@ -103,18 +105,11 @@ def read_feather( Parameters ---------- - path : str, path object or file-like object - Any valid string path is acceptable. The string could be a URL. Valid - URL schemes include http, ftp, s3, and file. For file URLs, a host is - expected. A local file could be: - ``file://localhost/path/to/table.feather``. - - If you want to pass in a path object, pandas accepts any - ``os.PathLike``. - - By file-like object, we refer to objects with a ``read()`` method, - such as a file handle (e.g. via builtin ``open`` function) - or ``StringIO``. + path : str, path object, or file-like object + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a binary ``read()`` function. The string could be a URL. + Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is + expected. A local file could be: ``file://localhost/path/to/table.feather``. columns : sequence, default None If not provided, all columns are read. use_threads : bool, default True diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index f078975e4b85a..18228a93b5285 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -20,10 +20,11 @@ from pandas._libs import writers as libwriters from pandas._typing import ( CompressionOptions, - FilePathOrBuffer, + FilePath, FloatFormatType, IndexLabel, StorageOptions, + WriteBuffer, ) from pandas.core.dtypes.generic import ( @@ -48,7 +49,7 @@ class CSVFormatter: def __init__( self, formatter: DataFrameFormatter, - path_or_buf: FilePathOrBuffer[str] | FilePathOrBuffer[bytes] = "", + path_or_buf: FilePath | WriteBuffer[str] | WriteBuffer[bytes] = "", sep: str = ",", cols: Sequence[Hashable] | None = None, index_label: IndexLabel | None = None, @@ -57,7 +58,7 @@ def __init__( errors: str = "strict", compression: CompressionOptions = "infer", quoting: int | None = None, - line_terminator="\n", + line_terminator: str | None = "\n", chunksize: int | None = None, quotechar: str | None = '"', date_format: str | None = None, @@ -245,7 +246,7 @@ def save(self) -> None: # Note: self.encoding is irrelevant here self.writer = csvlib.writer( - handles.handle, # type: ignore[arg-type] + handles.handle, lineterminator=self.line_terminator, delimiter=self.sep, quoting=self.quoting, diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index ca53bfb7d5e08..616331bf80a44 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -19,7 +19,6 @@ IO, TYPE_CHECKING, Any, - AnyStr, Callable, Hashable, Iterable, @@ -51,11 +50,12 @@ ColspaceArgType, ColspaceType, CompressionOptions, - FilePathOrBuffer, + FilePath, FloatFormatType, FormattersType, IndexLabel, StorageOptions, + WriteBuffer, ) from pandas.core.dtypes.common import ( @@ -1021,7 +1021,7 @@ def __init__(self, fmt: DataFrameFormatter): def to_latex( self, - buf: FilePathOrBuffer[str] | None = None, + buf: FilePath | WriteBuffer[str] | None = None, column_format: str | None = None, longtable: bool = False, encoding: str | None = None, @@ -1053,7 +1053,7 @@ def to_latex( def to_html( self, - buf: FilePathOrBuffer[str] | None = None, + buf: FilePath | WriteBuffer[str] | None = None, encoding: str | None = None, classes: str | list | tuple | None = None, notebook: bool = False, @@ -1066,8 +1066,10 @@ def to_html( Parameters ---------- - buf : str, Path or StringIO-like, optional, default None - Buffer to write to. If None, the output is returned as a string. + buf : str, path object, file-like object, or None, default None + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a string ``write()`` function. If None, the result is + returned as a string. encoding : str, default “utf-8” Set character encoding. classes : str or list-like @@ -1102,7 +1104,7 @@ def to_html( def to_string( self, - buf: FilePathOrBuffer[str] | None = None, + buf: FilePath | WriteBuffer[str] | None = None, encoding: str | None = None, line_width: int | None = None, ) -> str | None: @@ -1111,8 +1113,10 @@ def to_string( Parameters ---------- - buf : str, Path or StringIO-like, optional, default None - Buffer to write to. If None, the output is returned as a string. + buf : str, path object, file-like object, or None, default None + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a string ``write()`` function. If None, the result is + returned as a string. encoding: str, default “utf-8” Set character encoding. line_width : int, optional @@ -1126,7 +1130,7 @@ def to_string( def to_csv( self, - path_or_buf: FilePathOrBuffer[AnyStr] | None = None, + path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, encoding: str | None = None, sep: str = ",", columns: Sequence[Hashable] | None = None, @@ -1186,7 +1190,7 @@ def to_csv( def save_to_buffer( string: str, - buf: FilePathOrBuffer[str] | None = None, + buf: FilePath | WriteBuffer[str] | None = None, encoding: str | None = None, ) -> str | None: """ @@ -1200,7 +1204,7 @@ def save_to_buffer( @contextmanager -def get_buffer(buf: FilePathOrBuffer[str] | None, encoding: str | None = None): +def get_buffer(buf: FilePath | WriteBuffer[str] | None, encoding: str | None = None): """ Context manager to open, yield and close buffer for filenames or Path-like objects, otherwise yield buf unchanged. @@ -2142,7 +2146,7 @@ def get_level_lengths( return result -def buffer_put_lines(buf: IO[str], lines: list[str]) -> None: +def buffer_put_lines(buf: WriteBuffer[str], lines: list[str]) -> None: """ Appends lines to a buffer. diff --git a/pandas/io/formats/info.py b/pandas/io/formats/info.py index 64a59778a54f3..ddd2420731028 100644 --- a/pandas/io/formats/info.py +++ b/pandas/io/formats/info.py @@ -6,7 +6,6 @@ ) import sys from typing import ( - IO, TYPE_CHECKING, Iterable, Iterator, @@ -16,7 +15,10 @@ from pandas._config import get_option -from pandas._typing import Dtype +from pandas._typing import ( + Dtype, + WriteBuffer, +) from pandas.core.indexes.api import Index @@ -171,7 +173,7 @@ def size_qualifier(self) -> str: def render( self, *, - buf: IO[str] | None, + buf: WriteBuffer[str] | None, max_cols: int | None, verbose: bool | None, show_counts: bool | None, @@ -287,7 +289,7 @@ def memory_usage_bytes(self) -> int: def render( self, *, - buf: IO[str] | None, + buf: WriteBuffer[str] | None, max_cols: int | None, verbose: bool | None, show_counts: bool | None, @@ -306,7 +308,7 @@ class InfoPrinterAbstract: Class for printing dataframe or series info. """ - def to_buffer(self, buf: IO[str] | None = None) -> None: + def to_buffer(self, buf: WriteBuffer[str] | None = None) -> None: """Save dataframe info into buffer.""" table_builder = self._create_table_builder() lines = table_builder.get_lines() diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 40803ff14e357..b16e6e6366330 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -21,10 +21,11 @@ from pandas._typing import ( Axis, - FilePathOrBuffer, + FilePath, IndexLabel, Level, Scalar, + WriteBuffer, ) from pandas.compat._optional import import_optional_dependency from pandas.util._decorators import doc @@ -464,7 +465,7 @@ def to_excel( def to_latex( self, - buf: FilePathOrBuffer[str] | None = None, + buf: FilePath | WriteBuffer[str] | None = None, *, column_format: str | None = None, position: str | None = None, @@ -488,8 +489,10 @@ def to_latex( Parameters ---------- - buf : str, Path, or StringIO-like, optional, default None - Buffer to write to. If `None`, the output is returned as a string. + buf : str, path object, file-like object, or None, default None + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a string ``write()`` function. If None, the result is + returned as a string. column_format : str, optional The LaTeX column specification placed in location: @@ -893,7 +896,7 @@ def to_latex( def to_html( self, - buf: FilePathOrBuffer[str] | None = None, + buf: FilePath | WriteBuffer[str] | None = None, *, table_uuid: str | None = None, table_attributes: str | None = None, @@ -915,8 +918,10 @@ def to_html( Parameters ---------- - buf : str, Path, or StringIO-like, optional, default None - Buffer to write to. If ``None``, the output is returned as a string. + buf : str, path object, file-like object, or None, default None + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a string ``write()`` function. If None, the result is + returned as a string. table_uuid : str, optional Id attribute assigned to the <table> HTML element in the format: diff --git a/pandas/io/formats/xml.py b/pandas/io/formats/xml.py index ea7d1dfa1645e..b997cd9bddd1e 100644 --- a/pandas/io/formats/xml.py +++ b/pandas/io/formats/xml.py @@ -9,8 +9,10 @@ from pandas._typing import ( CompressionOptions, - FilePathOrBuffer, + FilePath, + ReadBuffer, StorageOptions, + WriteBuffer, ) from pandas.errors import AbstractMethodError @@ -90,7 +92,7 @@ class BaseXMLFormatter: def __init__( self, frame: DataFrame, - path_or_buffer: FilePathOrBuffer | None = None, + path_or_buffer: FilePath | WriteBuffer[bytes] | None = None, index: bool | None = True, root_name: str | None = "data", row_name: str | None = "row", @@ -102,7 +104,7 @@ def __init__( encoding: str = "utf-8", xml_declaration: bool | None = True, pretty_print: bool | None = True, - stylesheet: FilePathOrBuffer | None = None, + stylesheet: FilePath | ReadBuffer[str] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ) -> None: @@ -272,7 +274,7 @@ def write_output(self) -> str | None: storage_options=self.storage_options, is_text=False, ) as handles: - handles.handle.write(xml_doc) # type: ignore[arg-type] + handles.handle.write(xml_doc) return None else: @@ -582,7 +584,6 @@ def transform_doc(self) -> bytes: conditionally by its specific object type, then transforms original tree with XSLT script. """ - from lxml.etree import ( XSLT, XMLParser, @@ -591,6 +592,7 @@ def transform_doc(self) -> bytes: ) style_doc = self.stylesheet + assert style_doc is not None # is ensured by caller handle_data = get_data_from_filepath( filepath_or_buffer=style_doc, diff --git a/pandas/io/html.py b/pandas/io/html.py index cffe910f1c8ff..7985dcbec9672 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -14,7 +14,10 @@ Sequence, ) -from pandas._typing import FilePathOrBuffer +from pandas._typing import ( + FilePath, + ReadBuffer, +) from pandas.compat._optional import import_optional_dependency from pandas.errors import ( AbstractMethodError, @@ -119,18 +122,21 @@ def _get_skiprows(skiprows: int | Sequence[int] | slice | None): raise TypeError(f"{type(skiprows).__name__} is not a valid type for skipping rows") -def _read(obj: bytes | FilePathOrBuffer, encoding: str | None) -> str | bytes: +def _read( + obj: bytes | FilePath | ReadBuffer[str] | ReadBuffer[bytes], encoding: str | None +) -> str | bytes: """ Try to read from a url, file or string. Parameters ---------- - obj : str, unicode, or file-like + obj : str, unicode, path object, or file-like object Returns ------- raw_text : str """ + text: str | bytes if ( is_url(obj) or hasattr(obj, "read") @@ -148,9 +154,7 @@ def _read(obj: bytes | FilePathOrBuffer, encoding: str | None) -> str | bytes: text = obj else: raise TypeError(f"Cannot read object of type '{type(obj).__name__}'") - # error: Incompatible return value type (got "Union[Any, bytes, None, str]", - # expected "Union[str, bytes]") - return text # type: ignore[return-value] + return text class _HtmlFrameParser: @@ -211,7 +215,7 @@ class _HtmlFrameParser: def __init__( self, - io: FilePathOrBuffer, + io: FilePath | ReadBuffer[str] | ReadBuffer[bytes], match: str | Pattern, attrs: dict[str, str] | None, encoding: str, @@ -944,7 +948,7 @@ def _parse(flavor, io, match, attrs, encoding, displayed_only, **kwargs): @deprecate_nonkeyword_arguments(version="2.0") def read_html( - io: FilePathOrBuffer, + io: FilePath | ReadBuffer[str], match: str | Pattern = ".+", flavor: str | None = None, header: int | Sequence[int] | None = None, @@ -965,8 +969,10 @@ def read_html( Parameters ---------- - io : str, path object or file-like object - A URL, a file-like object, or a raw string containing HTML. Note that + io : str, path object, or file-like object + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a string ``read()`` function. + The string can represent a URL or the HTML itself. Note that lxml only accepts the http, ftp and file url protocols. If you have a URL that starts with ``'https'`` you might try removing the ``'s'``. diff --git a/pandas/io/orc.py b/pandas/io/orc.py index 6bdb4df806b5c..6dd4de597c29d 100644 --- a/pandas/io/orc.py +++ b/pandas/io/orc.py @@ -3,7 +3,10 @@ from typing import TYPE_CHECKING -from pandas._typing import FilePathOrBuffer +from pandas._typing import ( + FilePath, + ReadBuffer, +) from pandas.compat._optional import import_optional_dependency from pandas.io.common import get_handle @@ -13,7 +16,7 @@ def read_orc( - path: FilePathOrBuffer, columns: list[str] | None = None, **kwargs + path: FilePath | ReadBuffer[bytes], columns: list[str] | None = None, **kwargs ) -> DataFrame: """ Load an ORC object from the file path, returning a DataFrame. @@ -22,18 +25,12 @@ def read_orc( Parameters ---------- - path : str, path object or file-like object - Any valid string path is acceptable. The string could be a URL. Valid - URL schemes include http, ftp, s3, and file. For file URLs, a host is + path : str, path object, or file-like object + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a binary ``read()`` function. The string could be a URL. + Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is expected. A local file could be: ``file://localhost/path/to/table.orc``. - - If you want to pass in a path object, pandas accepts any - ``os.PathLike``. - - By file-like object, we refer to objects with a ``read()`` method, - such as a file handle (e.g. via builtin ``open`` function) - or ``StringIO``. columns : list, default None If not None, only these columns will be read from the file. **kwargs diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 2eb1dd2d44d65..c4b9e36472092 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -3,15 +3,14 @@ import io import os -from typing import ( - Any, - AnyStr, -) +from typing import Any from warnings import catch_warnings from pandas._typing import ( - FilePathOrBuffer, + FilePath, + ReadBuffer, StorageOptions, + WriteBuffer, ) from pandas.compat._optional import import_optional_dependency from pandas.errors import AbstractMethodError @@ -69,12 +68,14 @@ def get_engine(engine: str) -> BaseImpl: def _get_path_or_handle( - path: FilePathOrBuffer, + path: FilePath | ReadBuffer[bytes] | WriteBuffer[bytes], fs: Any, storage_options: StorageOptions = None, mode: str = "rb", is_dir: bool = False, -) -> tuple[FilePathOrBuffer, IOHandles[bytes] | None, Any]: +) -> tuple[ + FilePath | ReadBuffer[bytes] | WriteBuffer[bytes], IOHandles[bytes] | None, Any +]: """File handling for PyArrow.""" path_or_handle = stringify_path(path) if is_fsspec_url(path_or_handle) and fs is None: @@ -157,7 +158,7 @@ def __init__(self): def write( self, df: DataFrame, - path: FilePathOrBuffer[AnyStr], + path: FilePath | WriteBuffer[bytes], compression: str | None = "snappy", index: bool | None = None, storage_options: StorageOptions = None, @@ -353,7 +354,7 @@ def read( @doc(storage_options=generic._shared_docs["storage_options"]) def to_parquet( df: DataFrame, - path: FilePathOrBuffer | None = None, + path: FilePath | WriteBuffer[bytes] | None = None, engine: str = "auto", compression: str | None = "snappy", index: bool | None = None, @@ -367,13 +368,12 @@ def to_parquet( Parameters ---------- df : DataFrame - path : str or file-like object, default None - If a string, it will be used as Root Directory path - when writing a partitioned dataset. By file-like object, - we refer to objects with a write() method, such as a file handle - (e.g. via builtin open function) or io.BytesIO. The engine - fastparquet does not accept file-like objects. If path is None, - a bytes object is returned. + path : str, path object, file-like object, or None, default None + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a binary ``write()`` function. If None, the result is + returned as bytes. If a string, it will be used as Root Directory path + when writing a partitioned dataset. The engine fastparquet does not + accept file-like objects. .. versionchanged:: 1.2.0 @@ -415,7 +415,7 @@ def to_parquet( partition_cols = [partition_cols] impl = get_engine(engine) - path_or_buf: FilePathOrBuffer = io.BytesIO() if path is None else path + path_or_buf: FilePath | WriteBuffer[bytes] = io.BytesIO() if path is None else path impl.write( df, @@ -449,21 +449,15 @@ def read_parquet( Parameters ---------- path : str, path object or file-like object - Any valid string path is acceptable. The string could be a URL. Valid - URL schemes include http, ftp, s3, gs, and file. For file URLs, a host is - expected. A local file could be: + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a binary ``read()`` function. + The string could be a URL. Valid URL schemes include http, ftp, s3, + gs, and file. For file URLs, a host is expected. A local file could be: ``file://localhost/path/to/table.parquet``. A file URL can also be a path to a directory that contains multiple partitioned parquet files. Both pyarrow and fastparquet support paths to directories as well as file URLs. A directory path could be: ``file://localhost/path/to/tables`` or ``s3://bucket/partition_dir`` - - If you want to pass in a path object, pandas accepts any - ``os.PathLike``. - - By file-like object, we refer to objects with a ``read()`` method, - such as a file handle (e.g. via builtin ``open`` function) - or ``StringIO``. engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` diff --git a/pandas/io/parsers/arrow_parser_wrapper.py b/pandas/io/parsers/arrow_parser_wrapper.py index 5b1b178c4f610..9fbeeb74901ef 100644 --- a/pandas/io/parsers/arrow_parser_wrapper.py +++ b/pandas/io/parsers/arrow_parser_wrapper.py @@ -1,6 +1,9 @@ from __future__ import annotations -from pandas._typing import FilePathOrBuffer +from pandas._typing import ( + FilePath, + ReadBuffer, +) from pandas.compat._optional import import_optional_dependency from pandas.core.dtypes.inference import is_integer @@ -16,7 +19,7 @@ class ArrowParserWrapper(ParserBase): Wrapper for the pyarrow engine for read_csv() """ - def __init__(self, src: FilePathOrBuffer, **kwds): + def __init__(self, src: FilePath | ReadBuffer[bytes], **kwds): self.kwds = kwds self.src = src diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py index 42b9c8c9f10fe..d096e9008112b 100644 --- a/pandas/io/parsers/base_parser.py +++ b/pandas/io/parsers/base_parser.py @@ -26,7 +26,8 @@ from pandas._typing import ( ArrayLike, DtypeArg, - FilePathOrBuffer, + FilePath, + ReadCsvBuffer, ) from pandas.errors import ( ParserError, @@ -218,7 +219,11 @@ def __init__(self, kwds): # Normally, this arg would get pre-processed earlier on self.on_bad_lines = kwds.get("on_bad_lines", self.BadLineHandleMethod.ERROR) - def _open_handles(self, src: FilePathOrBuffer, kwds: dict[str, Any]) -> None: + def _open_handles( + self, + src: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + kwds: dict[str, Any], + ) -> None: """ Let the readers open IOHandles after they are done with their potential raises. """ diff --git a/pandas/io/parsers/c_parser_wrapper.py b/pandas/io/parsers/c_parser_wrapper.py index db750cded45e5..e96df3b3f3782 100644 --- a/pandas/io/parsers/c_parser_wrapper.py +++ b/pandas/io/parsers/c_parser_wrapper.py @@ -7,7 +7,8 @@ import pandas._libs.parsers as parsers from pandas._typing import ( ArrayLike, - FilePathOrBuffer, + FilePath, + ReadCsvBuffer, ) from pandas.errors import DtypeWarning from pandas.util._exceptions import find_stack_level @@ -31,7 +32,9 @@ class CParserWrapper(ParserBase): low_memory: bool _reader: parsers.TextReader - def __init__(self, src: FilePathOrBuffer, **kwds): + def __init__( + self, src: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], **kwds + ): self.kwds = kwds kwds = kwds.copy() ParserBase.__init__(self, kwds) diff --git a/pandas/io/parsers/python_parser.py b/pandas/io/parsers/python_parser.py index 36387f0835f4a..f5420618c0235 100644 --- a/pandas/io/parsers/python_parser.py +++ b/pandas/io/parsers/python_parser.py @@ -20,7 +20,8 @@ import pandas._libs.lib as lib from pandas._typing import ( - FilePathOrBuffer, + FilePath, + ReadCsvBuffer, Scalar, ) from pandas.errors import ( @@ -45,7 +46,9 @@ class PythonParser(ParserBase): - def __init__(self, f: FilePathOrBuffer | list, **kwds): + def __init__( + self, f: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str] | list, **kwds + ): """ Workhorse function for processing nested list into DataFrame """ diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index 0b57f0f5ef814..9f555d77948a7 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -17,7 +17,8 @@ from pandas._typing import ( ArrayLike, DtypeArg, - FilePathOrBuffer, + FilePath, + ReadCsvBuffer, StorageOptions, ) from pandas.errors import ( @@ -505,7 +506,9 @@ def _validate_names(names): raise ValueError("Names should be an ordered collection.") -def _read(filepath_or_buffer: FilePathOrBuffer, kwds): +def _read( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], kwds +): """Generic reader of line files.""" if kwds.get("date_parser", None) is not None: if isinstance(kwds["parse_dates"], bool): @@ -554,7 +557,7 @@ def _read(filepath_or_buffer: FilePathOrBuffer, kwds): ) ) def read_csv( - filepath_or_buffer: FilePathOrBuffer, + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], sep=lib.no_default, delimiter=None, # Column and Index Locations and Names @@ -652,7 +655,7 @@ def read_csv( ) ) def read_table( - filepath_or_buffer: FilePathOrBuffer, + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], sep=lib.no_default, delimiter=None, # Column and Index Locations and Names @@ -739,7 +742,7 @@ def read_table( def read_fwf( - filepath_or_buffer: FilePathOrBuffer, + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], colspecs="infer", widths=None, infer_nrows=100, @@ -756,18 +759,12 @@ def read_fwf( Parameters ---------- - filepath_or_buffer : str, path object or file-like object - Any valid string path is acceptable. The string could be a URL. Valid - URL schemes include http, ftp, s3, and file. For file URLs, a host is + filepath_or_buffer : str, path object, or file-like object + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a text ``read()`` function.The string could be a URL. + Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is expected. A local file could be: ``file://localhost/path/to/table.csv``. - - If you want to pass in a path object, pandas accepts any - ``os.PathLike``. - - By file-like object, we refer to objects with a ``read()`` method, - such as a file handle (e.g. via builtin ``open`` function) - or ``StringIO``. colspecs : list of tuple (int, int) or 'infer'. optional A list of tuples giving the extents of the fixed-width fields of each line as half-open intervals (i.e., [from, to[ ). @@ -942,10 +939,10 @@ def _get_options_with_defaults(self, engine): def _check_file_or_buffer(self, f, engine): # see gh-16530 - if is_file_like(f) and engine != "c" and not hasattr(f, "__next__"): - # The C engine doesn't need the file-like to have the "__next__" - # attribute. However, the Python engine explicitly calls - # "__next__(...)" when iterating through such an object, meaning it + if is_file_like(f) and engine != "c" and not hasattr(f, "__iter__"): + # The C engine doesn't need the file-like to have the "__iter__" + # attribute. However, the Python engine needs "__iter__(...)" + # when iterating through such an object, meaning it # needs to have that attribute raise ValueError( "The 'python' engine cannot iterate through this file buffer." diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py index 6a91c12ee286e..8bd0942550e6e 100644 --- a/pandas/io/pickle.py +++ b/pandas/io/pickle.py @@ -1,12 +1,16 @@ """ pickle compat """ +from __future__ import annotations + import pickle from typing import Any import warnings from pandas._typing import ( CompressionOptions, - FilePathOrBuffer, + FilePath, + ReadPickleBuffer, StorageOptions, + WriteBuffer, ) from pandas.compat import pickle_compat as pc from pandas.util._decorators import doc @@ -19,7 +23,7 @@ @doc(storage_options=generic._shared_docs["storage_options"]) def to_pickle( obj: Any, - filepath_or_buffer: FilePathOrBuffer, + filepath_or_buffer: FilePath | WriteBuffer[bytes], compression: CompressionOptions = "infer", protocol: int = pickle.HIGHEST_PROTOCOL, storage_options: StorageOptions = None, @@ -31,8 +35,9 @@ def to_pickle( ---------- obj : any object Any python object. - filepath_or_buffer : str, path object or file-like object - File path, URL, or buffer where the pickled object will be stored. + filepath_or_buffer : str, path object, or file-like object + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a binary ``write()`` function. .. versionchanged:: 1.0.0 Accept URL. URL has to be of S3 or GCS. @@ -103,26 +108,15 @@ def to_pickle( # pickle create the entire object and then write it to the buffer. # "zip" would also be here if pandas.io.common._BytesZipFile # wouldn't buffer write calls - handles.handle.write( - # error: Argument 1 to "write" of "TextIOBase" has incompatible type - # "bytes"; expected "str" - pickle.dumps(obj, protocol=protocol) # type: ignore[arg-type] - ) + handles.handle.write(pickle.dumps(obj, protocol=protocol)) else: # letting pickle write directly to the buffer is more memory-efficient - pickle.dump( - # error: Argument 2 to "dump" has incompatible type "Union[IO[Any], - # RawIOBase, BufferedIOBase, TextIOBase, TextIOWrapper, mmap]"; expected - # "IO[bytes]" - obj, - handles.handle, # type: ignore[arg-type] - protocol=protocol, - ) + pickle.dump(obj, handles.handle, protocol=protocol) @doc(storage_options=generic._shared_docs["storage_options"]) def read_pickle( - filepath_or_buffer: FilePathOrBuffer, + filepath_or_buffer: FilePath | ReadPickleBuffer, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ): @@ -136,8 +130,9 @@ def read_pickle( Parameters ---------- - filepath_or_buffer : str, path object or file-like object - File path, URL, or buffer where the pickled object will be loaded from. + filepath_or_buffer : str, path object, or file-like object + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a binary ``readlines()`` function. .. versionchanged:: 1.0.0 Accept URL. URL is not limited to S3 and GCS. @@ -211,10 +206,7 @@ def read_pickle( with warnings.catch_warnings(record=True): # We want to silence any warnings about, e.g. moved modules. warnings.simplefilter("ignore", Warning) - # error: Argument 1 to "load" has incompatible type "Union[IO[Any], - # RawIOBase, BufferedIOBase, TextIOBase, TextIOWrapper, mmap]"; - # expected "IO[bytes]" - return pickle.load(handles.handle) # type: ignore[arg-type] + return pickle.load(handles.handle) except excs_to_catch: # e.g. # "No module named 'pandas.core.sparse.series'" diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py index 300df9728cd75..cd863cabf5c2d 100644 --- a/pandas/io/sas/sas7bdat.py +++ b/pandas/io/sas/sas7bdat.py @@ -21,14 +21,14 @@ timedelta, ) import struct -from typing import ( - IO, - Any, - cast, -) +from typing import cast import numpy as np +from pandas._typing import ( + FilePath, + ReadBuffer, +) from pandas.errors import ( EmptyDataError, OutOfBoundsDatetime, @@ -159,7 +159,7 @@ class SAS7BDATReader(ReaderBase, abc.Iterator): def __init__( self, - path_or_buf, + path_or_buf: FilePath | ReadBuffer[bytes], index=None, convert_dates=True, blank_missing=True, @@ -179,16 +179,16 @@ def __init__( self.default_encoding = "latin-1" self.compression = b"" - self.column_names_strings = [] - self.column_names = [] - self.column_formats = [] - self.columns = [] + self.column_names_strings: list[str] = [] + self.column_names: list[str] = [] + self.column_formats: list[str] = [] + self.columns: list[_Column] = [] - self._current_page_data_subheader_pointers = [] + self._current_page_data_subheader_pointers: list[_SubheaderPointer] = [] self._cached_page = None - self._column_data_lengths = [] - self._column_data_offsets = [] - self._column_types = [] + self._column_data_lengths: list[int] = [] + self._column_data_offsets: list[int] = [] + self._column_types: list[bytes] = [] self._current_row_in_file_index = 0 self._current_row_on_page_index = 0 @@ -196,7 +196,7 @@ def __init__( self.handles = get_handle(path_or_buf, "rb", is_text=False) - self._path_or_buf = cast(IO[Any], self.handles.handle) + self._path_or_buf = self.handles.handle try: self._get_properties() @@ -227,7 +227,7 @@ def _get_properties(self) -> None: # Check magic number self._path_or_buf.seek(0) - self._cached_page = cast(bytes, self._path_or_buf.read(288)) + self._cached_page = self._path_or_buf.read(288) if self._cached_page[0 : len(const.magic)] != const.magic: raise ValueError("magic number mismatch (not a SAS file?)") @@ -301,7 +301,7 @@ def _get_properties(self) -> None: ) # Read the rest of the header into cached_page. - buf = cast(bytes, self._path_or_buf.read(self.header_length - 288)) + buf = self._path_or_buf.read(self.header_length - 288) self._cached_page += buf # error: Argument 1 to "len" has incompatible type "Optional[bytes]"; # expected "Sized" @@ -400,7 +400,7 @@ def _read_bytes(self, offset: int, length: int): def _parse_metadata(self) -> None: done = False while not done: - self._cached_page = cast(bytes, self._path_or_buf.read(self._page_length)) + self._cached_page = self._path_or_buf.read(self._page_length) if len(self._cached_page) <= 0: break if len(self._cached_page) != self._page_length: @@ -761,7 +761,7 @@ def read(self, nrows: int | None = None) -> DataFrame | None: def _read_next_page(self): self._current_page_data_subheader_pointers = [] - self._cached_page = cast(bytes, self._path_or_buf.read(self._page_length)) + self._cached_page = self._path_or_buf.read(self._page_length) if len(self._cached_page) <= 0: return True elif len(self._cached_page) != self._page_length: @@ -817,7 +817,7 @@ def _chunk_to_dataframe(self) -> DataFrame: js += 1 else: self.close() - raise ValueError(f"unknown column type {self._column_types[j]}") + raise ValueError(f"unknown column type {repr(self._column_types[j])}") df = DataFrame(rslt, columns=self.column_names, index=ix, copy=False) return df diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py index 3f9bf6662e99f..d8a3412e05d05 100644 --- a/pandas/io/sas/sas_xport.py +++ b/pandas/io/sas/sas_xport.py @@ -7,17 +7,19 @@ https://support.sas.com/techsup/technote/ts140.pdf """ +from __future__ import annotations + from collections import abc from datetime import datetime import struct -from typing import ( - IO, - cast, -) import warnings import numpy as np +from pandas._typing import ( + FilePath, + ReadBuffer, +) from pandas.util._decorators import Appender import pandas as pd @@ -248,7 +250,11 @@ class XportReader(ReaderBase, abc.Iterator): __doc__ = _xport_reader_doc def __init__( - self, filepath_or_buffer, index=None, encoding="ISO-8859-1", chunksize=None + self, + filepath_or_buffer: FilePath | ReadBuffer[bytes], + index=None, + encoding: str | None = "ISO-8859-1", + chunksize=None, ): self._encoding = encoding @@ -259,7 +265,7 @@ def __init__( self.handles = get_handle( filepath_or_buffer, "rb", encoding=encoding, is_text=False ) - self.filepath_or_buffer = cast(IO[bytes], self.handles.handle) + self.filepath_or_buffer = self.handles.handle try: self._read_header() diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py index b323ce39763a1..f50fc777f55e9 100644 --- a/pandas/io/sas/sasreader.py +++ b/pandas/io/sas/sasreader.py @@ -13,7 +13,10 @@ overload, ) -from pandas._typing import FilePathOrBuffer +from pandas._typing import ( + FilePath, + ReadBuffer, +) from pandas.io.common import stringify_path @@ -44,7 +47,7 @@ def __exit__(self, exc_type, exc_value, traceback): @overload def read_sas( - filepath_or_buffer: FilePathOrBuffer, + filepath_or_buffer: FilePath | ReadBuffer[bytes], format: str | None = ..., index: Hashable | None = ..., encoding: str | None = ..., @@ -56,7 +59,7 @@ def read_sas( @overload def read_sas( - filepath_or_buffer: FilePathOrBuffer, + filepath_or_buffer: FilePath | ReadBuffer[bytes], format: str | None = ..., index: Hashable | None = ..., encoding: str | None = ..., @@ -67,7 +70,7 @@ def read_sas( def read_sas( - filepath_or_buffer: FilePathOrBuffer, + filepath_or_buffer: FilePath | ReadBuffer[bytes], format: str | None = None, index: Hashable | None = None, encoding: str | None = None, @@ -79,18 +82,12 @@ def read_sas( Parameters ---------- - filepath_or_buffer : str, path object or file-like object - Any valid string path is acceptable. The string could be a URL. Valid - URL schemes include http, ftp, s3, and file. For file URLs, a host is + filepath_or_buffer : str, path object, or file-like object + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a binary ``read()`` function. The string could be a URL. + Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is expected. A local file could be: ``file://localhost/path/to/table.sas``. - - If you want to pass in a path object, pandas accepts any - ``os.PathLike``. - - By file-like object, we refer to objects with a ``read()`` method, - such as a file handle (e.g. via builtin ``open`` function) - or ``StringIO``. format : str {'xport', 'sas7bdat'} or None If None, file format is inferred from file extension. If 'xport' or 'sas7bdat', uses the corresponding format. diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 013f17580600d..ff9d8a1be3d1e 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -18,6 +18,7 @@ import struct import sys from typing import ( + IO, TYPE_CHECKING, Any, AnyStr, @@ -33,10 +34,11 @@ from pandas._libs.lib import infer_dtype from pandas._libs.writers import max_len_string_array from pandas._typing import ( - Buffer, CompressionOptions, - FilePathOrBuffer, + FilePath, + ReadBuffer, StorageOptions, + WriteBuffer, ) from pandas.util._decorators import ( Appender, @@ -1117,7 +1119,7 @@ class StataReader(StataParser, abc.Iterator): def __init__( self, - path_or_buf: FilePathOrBuffer, + path_or_buf: FilePath | ReadBuffer[bytes], convert_dates: bool = True, convert_categoricals: bool = True, index_col: str | None = None, @@ -1168,10 +1170,7 @@ def __init__( compression=compression, ) as handles: # Copy to BytesIO, and ensure no encoding - - # Argument 1 to "BytesIO" has incompatible type "Union[Any, bytes, None, - # str]"; expected "bytes" - self.path_or_buf = BytesIO(handles.handle.read()) # type: ignore[arg-type] + self.path_or_buf = BytesIO(handles.handle.read()) self._read_header() self._setup_dtype() @@ -2002,7 +2001,7 @@ def value_labels(self) -> dict[str, dict[float | int, str]]: @Appender(_read_stata_doc) def read_stata( - filepath_or_buffer: FilePathOrBuffer, + filepath_or_buffer: FilePath | ReadBuffer[bytes], convert_dates: bool = True, convert_categoricals: bool = True, index_col: str | None = None, @@ -2270,7 +2269,7 @@ class StataWriter(StataParser): def __init__( self, - fname: FilePathOrBuffer, + fname: FilePath | WriteBuffer[bytes], data: DataFrame, convert_dates: dict[Hashable, str] | None = None, write_index: bool = True, @@ -2294,7 +2293,7 @@ def __init__( self._value_labels: list[StataValueLabel] = [] self._has_value_labels = np.array([], dtype=bool) self._compression = compression - self._output_file: Buffer[bytes] | None = None + self._output_file: IO[bytes] | None = None self._converted_names: dict[Hashable, str] = {} # attach nobs, nvars, data, varlist, typlist self._prepare_pandas(data) @@ -2310,15 +2309,13 @@ def _write(self, to_write: str) -> None: """ Helper to call encode before writing to file for Python 3 compat. """ - self.handles.handle.write( - to_write.encode(self._encoding) # type: ignore[arg-type] - ) + self.handles.handle.write(to_write.encode(self._encoding)) def _write_bytes(self, value: bytes) -> None: """ Helper to assert file is open before writing. """ - self.handles.handle.write(value) # type: ignore[arg-type] + self.handles.handle.write(value) def _prepare_non_cat_value_labels( self, data: DataFrame @@ -2686,7 +2683,7 @@ def _close(self) -> None: if self._output_file is not None: assert isinstance(self.handles.handle, BytesIO) bio, self.handles.handle = self.handles.handle, self._output_file - self.handles.handle.write(bio.getvalue()) # type: ignore[arg-type] + self.handles.handle.write(bio.getvalue()) def _write_map(self) -> None: """No-op, future compatibility""" @@ -3203,7 +3200,7 @@ class StataWriter117(StataWriter): def __init__( self, - fname: FilePathOrBuffer, + fname: FilePath | WriteBuffer[bytes], data: DataFrame, convert_dates: dict[Hashable, str] | None = None, write_index: bool = True, @@ -3605,7 +3602,7 @@ class StataWriterUTF8(StataWriter117): def __init__( self, - fname: FilePathOrBuffer, + fname: FilePath | WriteBuffer[bytes], data: DataFrame, convert_dates: dict[Hashable, str] | None = None, write_index: bool = True, diff --git a/pandas/io/xml.py b/pandas/io/xml.py index bc3436861f1a8..3c3b4afa2c57d 100644 --- a/pandas/io/xml.py +++ b/pandas/io/xml.py @@ -7,9 +7,9 @@ import io from pandas._typing import ( - Buffer, CompressionOptions, - FilePathOrBuffer, + FilePath, + ReadBuffer, StorageOptions, ) from pandas.compat._optional import import_optional_dependency @@ -199,9 +199,6 @@ class _EtreeFrameParser(_XMLFrameParser): standard library XML module: `xml.etree.ElementTree`. """ - def __init__(self, *args, **kwargs) -> None: - super().__init__(*args, **kwargs) - def parse_data(self) -> list[dict[str, str | None]]: from xml.etree.ElementTree import XML @@ -571,11 +568,11 @@ def _transform_doc(self) -> bytes: def get_data_from_filepath( - filepath_or_buffer, + filepath_or_buffer: FilePath | bytes | ReadBuffer[bytes] | ReadBuffer[str], encoding, compression, storage_options, -) -> str | bytes | Buffer: +) -> str | bytes | ReadBuffer[bytes] | ReadBuffer[str]: """ Extract raw XML data. @@ -587,7 +584,8 @@ def get_data_from_filepath( This method turns (1) into (2) to simplify the rest of the processing. It returns input types (2) and (3) unchanged. """ - filepath_or_buffer = stringify_path(filepath_or_buffer) + if not isinstance(filepath_or_buffer, bytes): + filepath_or_buffer = stringify_path(filepath_or_buffer) if ( isinstance(filepath_or_buffer, str) @@ -606,7 +604,10 @@ def get_data_from_filepath( storage_options=storage_options, ) as handle_obj: filepath_or_buffer = ( - handle_obj.handle.read() + # error: Incompatible types in assignment (expression has type + # "Union[str, IO[str]]", variable has type "Union[Union[str, + # PathLike[str]], bytes, ReadBuffer[bytes], ReadBuffer[str]]") + handle_obj.handle.read() # type: ignore[assignment] if hasattr(handle_obj.handle, "read") else handle_obj.handle ) @@ -728,7 +729,7 @@ def _parse( @doc(storage_options=_shared_docs["storage_options"]) def read_xml( - path_or_buffer: FilePathOrBuffer, + path_or_buffer: FilePath | ReadBuffer[bytes] | ReadBuffer[str], xpath: str | None = "./*", namespaces: dict | list[dict] | None = None, elems_only: bool | None = False, @@ -736,7 +737,7 @@ def read_xml( names: list[str] | None = None, encoding: str | None = "utf-8", parser: str | None = "lxml", - stylesheet: FilePathOrBuffer | None = None, + stylesheet: FilePath | ReadBuffer[bytes] | ReadBuffer[str] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ) -> DataFrame: @@ -748,8 +749,10 @@ def read_xml( Parameters ---------- path_or_buffer : str, path object, or file-like object - Any valid XML string or path is acceptable. The string could be a URL. - Valid URL schemes include http, ftp, s3, and file. + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a ``read()`` function. The string can be any valid XML + string or a path. The string can further be a URL. Valid URL schemes + include http, ftp, s3, and file. xpath : str, optional, default './\*' The XPath to parse required set of nodes for migration to DataFrame. diff --git a/pandas/tests/io/parser/test_python_parser_only.py b/pandas/tests/io/parser/test_python_parser_only.py index f62c9fd1349bf..df8be721ec871 100644 --- a/pandas/tests/io/parser/test_python_parser_only.py +++ b/pandas/tests/io/parser/test_python_parser_only.py @@ -310,3 +310,22 @@ def test_malformed_skipfooter(python_parser_only): msg = "Expected 3 fields in line 4, saw 5" with pytest.raises(ParserError, match=msg): parser.read_csv(StringIO(data), header=1, comment="#", skipfooter=1) + + +def test_python_engine_file_no_next(python_parser_only): + parser = python_parser_only + + class NoNextBuffer: + def __init__(self, csv_data): + self.data = csv_data + + def __iter__(self): + return self.data.__iter__() + + def read(self): + return self.data + + def readline(self): + return self.data + + parser.read_csv(NoNextBuffer("a\n1")) diff --git a/pandas/tests/io/parser/test_unsupported.py b/pandas/tests/io/parser/test_unsupported.py index 1e5cf49ce24ae..89d35499fd597 100644 --- a/pandas/tests/io/parser/test_unsupported.py +++ b/pandas/tests/io/parser/test_unsupported.py @@ -104,22 +104,25 @@ def test_python_engine(self, python_engine): with pytest.raises(ValueError, match=msg): read_csv(StringIO(data), engine=python_engine, **kwargs) - def test_python_engine_file_no_next(self, python_engine): + def test_python_engine_file_no_iter(self, python_engine): # see gh-16530 class NoNextBuffer: def __init__(self, csv_data): self.data = csv_data - def __iter__(self): - return self + def __next__(self): + return self.data.__next__() def read(self): return self.data + def readline(self): + return self.data + data = "a\n1" - msg = "The 'python' engine cannot iterate" + msg = "'NoNextBuffer' object is not iterable|argument 1 must be an iterator" - with pytest.raises(ValueError, match=msg): + with pytest.raises(TypeError, match=msg): read_csv(NoNextBuffer(data), engine=python_engine) def test_pyarrow_engine(self): diff --git a/pandas/tests/io/xml/test_to_xml.py b/pandas/tests/io/xml/test_to_xml.py index c257b61db296e..b8d146c597d2c 100644 --- a/pandas/tests/io/xml/test_to_xml.py +++ b/pandas/tests/io/xml/test_to_xml.py @@ -1287,8 +1287,7 @@ def test_compression_output(parser, comp): output = equalize_decl(output) - # error: Item "None" of "Union[str, bytes, None]" has no attribute "strip" - assert geom_xml == output.strip() # type: ignore[union-attr] + assert geom_xml == output.strip() @pytest.mark.parametrize("comp", ["bz2", "gzip", "xz", "zip"]) @@ -1306,8 +1305,7 @@ def test_filename_and_suffix_comp(parser, comp, compfile): output = equalize_decl(output) - # error: Item "None" of "Union[str, bytes, None]" has no attribute "strip" - assert geom_xml == output.strip() # type: ignore[union-attr] + assert geom_xml == output.strip() def test_unsuported_compression(datapath, parser):
Fixes #41610, rebased on top of #43855. This PR does a few things: 1. break `FilePathOrBuffer` apart to not mix basic types and generics 2. use protocols instead of union of specific classes 3. define many fine-grained protocols for the to/read methods/functions I tested that the protocols are sufficient (need no additional attributes/methods) using mock classes with: - read_csv (python/c/pyarrow; w/wo compression) and to_csv (w/wo compression) - to_json and read_json (each w/wo compression) - to_pickle, read_pickle - to_excel (openpyxl/xlsxwriter) and read_excel (openpyxl) - to_stata and read_stata Future: use many overloads for `get_handle` to return the (wrapped) fine-grained protocols.
https://api.github.com/repos/pandas-dev/pandas/pulls/43951
2021-10-10T02:23:24Z
2021-11-17T02:11:38Z
2021-11-17T02:11:38Z
2021-11-24T23:02:11Z
CLN: Lint for comprehension codes
diff --git a/ci/lint.sh b/ci/lint.sh index ba5334310c34a..c7ea92e6a67e6 100755 --- a/ci/lint.sh +++ b/ci/lint.sh @@ -20,14 +20,14 @@ if [ "$LINT" ]; then # pandas/_libs/src is C code, so no need to search there. echo "Linting *.py" - flake8 pandas --filename=*.py --exclude pandas/_libs/src --ignore=C406,C408,C409,C410,E402,E731,E741,W503 + flake8 pandas --filename=*.py --exclude pandas/_libs/src --ignore=C406,C408,C409,E402,E731,E741,W503 if [ $? -ne "0" ]; then RET=1 fi echo "Linting *.py DONE" echo "Linting setup.py" - flake8 setup.py --ignore=C406,C408,C409,C410,E402,E731,E741,W503 + flake8 setup.py --ignore=E402,E731,E741,W503 if [ $? -ne "0" ]; then RET=1 fi @@ -41,21 +41,21 @@ if [ "$LINT" ]; then echo "Linting asv_bench/benchmarks/*.py DONE" echo "Linting scripts/*.py" - flake8 scripts --filename=*.py --ignore=C406,C408,C409,C410,E402,E731,E741,W503 + flake8 scripts --filename=*.py --ignore=C408,E402,E731,E741,W503 if [ $? -ne "0" ]; then RET=1 fi echo "Linting scripts/*.py DONE" echo "Linting doc scripts" - flake8 doc/make.py doc/source/conf.py --ignore=C406,C408,C409,C410,E402,E731,E741,W503 + flake8 doc/make.py doc/source/conf.py --ignore=E402,E731,E741,W503 if [ $? -ne "0" ]; then RET=1 fi echo "Linting doc scripts DONE" echo "Linting *.pyx" - flake8 pandas --filename=*.pyx --select=E501,E302,E203,E111,E114,E221,E303,E128,E231,E126,E265,E305,E301,E127,E261,E271,E129,W291,E222,E241,E123,F403,C400,C401,C402,C403,C404,C407,C411 + flake8 pandas --filename=*.pyx --select=E501,E302,E203,E111,E114,E221,E303,E128,E231,E126,E265,E305,E301,E127,E261,E271,E129,W291,E222,E241,E123,F403,C400,C401,C402,C403,C404,C405,C406,C407,C408,C409,C410,C411 if [ $? -ne "0" ]; then RET=1 fi
xref #22122 Applying the lint comprehension codes that seem to be fully clean in specified paths in `lint.sh`
https://api.github.com/repos/pandas-dev/pandas/pulls/22455
2018-08-22T06:06:51Z
2018-08-22T10:05:14Z
2018-08-22T10:05:14Z
2018-08-22T15:59:24Z
use fused types for reshape
diff --git a/pandas/_libs/reshape.pyx b/pandas/_libs/reshape.pyx index 8d7e314517ed8..9f4e67ca4e256 100644 --- a/pandas/_libs/reshape.pyx +++ b/pandas/_libs/reshape.pyx @@ -1,15 +1,95 @@ # -*- coding: utf-8 -*- -cimport cython -from cython cimport Py_ssize_t +import cython +from cython import Py_ssize_t -import numpy as np -from numpy cimport (ndarray, - int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, +from numpy cimport (int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, uint32_t, uint64_t, float32_t, float64_t) -cdef double NaN = <double> np.NaN -cdef double nan = NaN +ctypedef fused reshape_t: + uint8_t + uint16_t + uint32_t + uint64_t + int8_t + int16_t + int32_t + int64_t + float32_t + float64_t + object -include "reshape_helper.pxi" + +@cython.wraparound(False) +@cython.boundscheck(False) +def unstack(reshape_t[:, :] values, uint8_t[:] mask, + Py_ssize_t stride, Py_ssize_t length, Py_ssize_t width, + reshape_t[:, :] new_values, uint8_t[:, :] new_mask): + """ + transform long sorted_values to wide new_values + + Parameters + ---------- + values : typed ndarray + mask : boolean ndarray + stride : int + length : int + width : int + new_values : typed ndarray + result array + new_mask : boolean ndarray + result mask + """ + cdef: + Py_ssize_t i, j, w, nulls, s, offset + + if reshape_t is not object: + # evaluated at compile-time + with nogil: + for i in range(stride): + + nulls = 0 + for j in range(length): + + for w in range(width): + + offset = j * width + w + + if mask[offset]: + s = i * width + w + new_values[j, s] = values[offset - nulls, i] + new_mask[j, s] = 1 + else: + nulls += 1 + + else: + # object-dtype, identical to above but we cannot use nogil + for i in range(stride): + + nulls = 0 + for j in range(length): + + for w in range(width): + + offset = j * width + w + + if mask[offset]: + s = i * width + w + new_values[j, s] = values[offset - nulls, i] + new_mask[j, s] = 1 + else: + nulls += 1 + + +unstack_uint8 = unstack["uint8_t"] +unstack_uint16 = unstack["uint16_t"] +unstack_uint32 = unstack["uint32_t"] +unstack_uint64 = unstack["uint64_t"] +unstack_int8 = unstack["int8_t"] +unstack_int16 = unstack["int16_t"] +unstack_int32 = unstack["int32_t"] +unstack_int64 = unstack["int64_t"] +unstack_float32 = unstack["float32_t"] +unstack_float64 = unstack["float64_t"] +unstack_object = unstack["object"] diff --git a/pandas/_libs/reshape_helper.pxi.in b/pandas/_libs/reshape_helper.pxi.in deleted file mode 100644 index bb9a5977f8b45..0000000000000 --- a/pandas/_libs/reshape_helper.pxi.in +++ /dev/null @@ -1,81 +0,0 @@ -""" -Template for each `dtype` helper function for take - -WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in -""" - -# ---------------------------------------------------------------------- -# reshape -# ---------------------------------------------------------------------- - -{{py: - -# name, c_type -dtypes = [('uint8', 'uint8_t'), - ('uint16', 'uint16_t'), - ('uint32', 'uint32_t'), - ('uint64', 'uint64_t'), - ('int8', 'int8_t'), - ('int16', 'int16_t'), - ('int32', 'int32_t'), - ('int64', 'int64_t'), - ('float32', 'float32_t'), - ('float64', 'float64_t'), - ('object', 'object')] -}} - -{{for dtype, c_type in dtypes}} - - -@cython.wraparound(False) -@cython.boundscheck(False) -def unstack_{{dtype}}(ndarray[{{c_type}}, ndim=2] values, - ndarray[uint8_t, ndim=1] mask, - Py_ssize_t stride, - Py_ssize_t length, - Py_ssize_t width, - ndarray[{{c_type}}, ndim=2] new_values, - ndarray[uint8_t, ndim=2] new_mask): - """ - transform long sorted_values to wide new_values - - Parameters - ---------- - values : typed ndarray - mask : boolean ndarray - stride : int - length : int - width : int - new_values : typed ndarray - result array - new_mask : boolean ndarray - result mask - - """ - - cdef: - Py_ssize_t i, j, w, nulls, s, offset - - {{if dtype == 'object'}} - if True: - {{else}} - with nogil: - {{endif}} - - for i in range(stride): - - nulls = 0 - for j in range(length): - - for w in range(width): - - offset = j * width + w - - if mask[offset]: - s = i * width + w - new_values[j, s] = values[offset - nulls, i] - new_mask[j, s] = 1 - else: - nulls += 1 - -{{endfor}} diff --git a/setup.py b/setup.py index 964167737c9c6..8cc1c0c0651e7 100755 --- a/setup.py +++ b/setup.py @@ -77,7 +77,6 @@ def is_platform_windows(): '_libs/algos_rank_helper.pxi.in'], 'groupby': ['_libs/groupby_helper.pxi.in'], 'join': ['_libs/join_helper.pxi.in', '_libs/join_func_helper.pxi.in'], - 'reshape': ['_libs/reshape_helper.pxi.in'], 'hashtable': ['_libs/hashtable_class_helper.pxi.in', '_libs/hashtable_func_helper.pxi.in'], 'index': ['_libs/index_class_helper.pxi.in'], @@ -559,7 +558,7 @@ def srcpath(name=None, suffix='.pyx', subdir='src'): 'include': []}, '_libs.reshape': { 'pyxfile': '_libs/reshape', - 'depends': _pxi_dep['reshape']}, + 'depends': []}, '_libs.skiplist': { 'pyxfile': '_libs/skiplist', 'depends': ['pandas/_libs/src/skiplist.h']},
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/22454
2018-08-21T23:40:56Z
2018-09-18T12:43:14Z
2018-09-18T12:43:14Z
2018-09-18T13:51:39Z
use fused types for parts of algos_common_helper
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index 415e7026e09c8..d2914dc8ac751 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -353,6 +353,523 @@ def nancorr_spearman(ndarray[float64_t, ndim=2] mat, Py_ssize_t minp=1): return result +# ---------------------------------------------------------------------- + +ctypedef fused algos_t: + float64_t + float32_t + object + int32_t + int64_t + uint64_t + uint8_t + + +# TODO: unused; needed? +@cython.wraparound(False) +@cython.boundscheck(False) +cpdef map_indices(ndarray[algos_t] index): + """ + Produce a dict mapping the values of the input array to their respective + locations. + + Example: + array(['hi', 'there']) --> {'hi' : 0 , 'there' : 1} + + Better to do this with Cython because of the enormous speed boost. + """ + cdef: + Py_ssize_t i, length + dict result = {} + + length = len(index) + + for i in range(length): + result[index[i]] = i + + return result + + +@cython.boundscheck(False) +@cython.wraparound(False) +def pad(ndarray[algos_t] old, ndarray[algos_t] new, limit=None): + cdef: + Py_ssize_t i, j, nleft, nright + ndarray[int64_t, ndim=1] indexer + algos_t cur, next + int lim, fill_count = 0 + + nleft = len(old) + nright = len(new) + indexer = np.empty(nright, dtype=np.int64) + indexer.fill(-1) + + if limit is None: + lim = nright + else: + if not util.is_integer_object(limit): + raise ValueError('Limit must be an integer') + if limit < 1: + raise ValueError('Limit must be greater than 0') + lim = limit + + if nleft == 0 or nright == 0 or new[nright - 1] < old[0]: + return indexer + + i = j = 0 + + cur = old[0] + + while j <= nright - 1 and new[j] < cur: + j += 1 + + while True: + if j == nright: + break + + if i == nleft - 1: + while j < nright: + if new[j] == cur: + indexer[j] = i + elif new[j] > cur and fill_count < lim: + indexer[j] = i + fill_count += 1 + j += 1 + break + + next = old[i + 1] + + while j < nright and cur <= new[j] < next: + if new[j] == cur: + indexer[j] = i + elif fill_count < lim: + indexer[j] = i + fill_count += 1 + j += 1 + + fill_count = 0 + i += 1 + cur = next + + return indexer + + +pad_float64 = pad["float64_t"] +pad_float32 = pad["float32_t"] +pad_object = pad["object"] +pad_int64 = pad["int64_t"] +pad_int32 = pad["int32_t"] +pad_uint64 = pad["uint64_t"] +pad_bool = pad["uint8_t"] + + +@cython.boundscheck(False) +@cython.wraparound(False) +def pad_inplace(ndarray[algos_t] values, + ndarray[uint8_t, cast=True] mask, + limit=None): + cdef: + Py_ssize_t i, N + algos_t val + int lim, fill_count = 0 + + N = len(values) + + # GH#2778 + if N == 0: + return + + if limit is None: + lim = N + else: + if not util.is_integer_object(limit): + raise ValueError('Limit must be an integer') + if limit < 1: + raise ValueError('Limit must be greater than 0') + lim = limit + + val = values[0] + for i in range(N): + if mask[i]: + if fill_count >= lim: + continue + fill_count += 1 + values[i] = val + else: + fill_count = 0 + val = values[i] + + +pad_inplace_float64 = pad_inplace["float64_t"] +pad_inplace_float32 = pad_inplace["float32_t"] +pad_inplace_object = pad_inplace["object"] +pad_inplace_int64 = pad_inplace["int64_t"] +pad_inplace_int32 = pad_inplace["int32_t"] +pad_inplace_uint64 = pad_inplace["uint64_t"] +pad_inplace_bool = pad_inplace["uint8_t"] + + +@cython.boundscheck(False) +@cython.wraparound(False) +def pad_2d_inplace(ndarray[algos_t, ndim=2] values, + ndarray[uint8_t, ndim=2] mask, + limit=None): + cdef: + Py_ssize_t i, j, N, K + algos_t val + int lim, fill_count = 0 + + K, N = (<object> values).shape + + # GH#2778 + if N == 0: + return + + if limit is None: + lim = N + else: + if not util.is_integer_object(limit): + raise ValueError('Limit must be an integer') + if limit < 1: + raise ValueError('Limit must be greater than 0') + lim = limit + + for j in range(K): + fill_count = 0 + val = values[j, 0] + for i in range(N): + if mask[j, i]: + if fill_count >= lim: + continue + fill_count += 1 + values[j, i] = val + else: + fill_count = 0 + val = values[j, i] + + +pad_2d_inplace_float64 = pad_2d_inplace["float64_t"] +pad_2d_inplace_float32 = pad_2d_inplace["float32_t"] +pad_2d_inplace_object = pad_2d_inplace["object"] +pad_2d_inplace_int64 = pad_2d_inplace["int64_t"] +pad_2d_inplace_int32 = pad_2d_inplace["int32_t"] +pad_2d_inplace_uint64 = pad_2d_inplace["uint64_t"] +pad_2d_inplace_bool = pad_2d_inplace["uint8_t"] + + +""" +Backfilling logic for generating fill vector + +Diagram of what's going on + +Old New Fill vector Mask + . 0 1 + . 0 1 + . 0 1 +A A 0 1 + . 1 1 + . 1 1 + . 1 1 + . 1 1 + . 1 1 +B B 1 1 + . 2 1 + . 2 1 + . 2 1 +C C 2 1 + . 0 + . 0 +D +""" + + +@cython.boundscheck(False) +@cython.wraparound(False) +def backfill(ndarray[algos_t] old, ndarray[algos_t] new, limit=None): + cdef: + Py_ssize_t i, j, nleft, nright + ndarray[int64_t, ndim=1] indexer + algos_t cur, prev + int lim, fill_count = 0 + + nleft = len(old) + nright = len(new) + indexer = np.empty(nright, dtype=np.int64) + indexer.fill(-1) + + if limit is None: + lim = nright + else: + if not util.is_integer_object(limit): + raise ValueError('Limit must be an integer') + if limit < 1: + raise ValueError('Limit must be greater than 0') + lim = limit + + if nleft == 0 or nright == 0 or new[0] > old[nleft - 1]: + return indexer + + i = nleft - 1 + j = nright - 1 + + cur = old[nleft - 1] + + while j >= 0 and new[j] > cur: + j -= 1 + + while True: + if j < 0: + break + + if i == 0: + while j >= 0: + if new[j] == cur: + indexer[j] = i + elif new[j] < cur and fill_count < lim: + indexer[j] = i + fill_count += 1 + j -= 1 + break + + prev = old[i - 1] + + while j >= 0 and prev < new[j] <= cur: + if new[j] == cur: + indexer[j] = i + elif new[j] < cur and fill_count < lim: + indexer[j] = i + fill_count += 1 + j -= 1 + + fill_count = 0 + i -= 1 + cur = prev + + return indexer + + +backfill_float64 = backfill["float64_t"] +backfill_float32 = backfill["float32_t"] +backfill_object = backfill["object"] +backfill_int64 = backfill["int64_t"] +backfill_int32 = backfill["int32_t"] +backfill_uint64 = backfill["uint64_t"] +backfill_bool = backfill["uint8_t"] + + +@cython.boundscheck(False) +@cython.wraparound(False) +def backfill_inplace(ndarray[algos_t] values, + ndarray[uint8_t, cast=True] mask, + limit=None): + cdef: + Py_ssize_t i, N + algos_t val + int lim, fill_count = 0 + + N = len(values) + + # GH#2778 + if N == 0: + return + + if limit is None: + lim = N + else: + if not util.is_integer_object(limit): + raise ValueError('Limit must be an integer') + if limit < 1: + raise ValueError('Limit must be greater than 0') + lim = limit + + val = values[N - 1] + for i in range(N - 1, -1, -1): + if mask[i]: + if fill_count >= lim: + continue + fill_count += 1 + values[i] = val + else: + fill_count = 0 + val = values[i] + + +backfill_inplace_float64 = backfill_inplace["float64_t"] +backfill_inplace_float32 = backfill_inplace["float32_t"] +backfill_inplace_object = backfill_inplace["object"] +backfill_inplace_int64 = backfill_inplace["int64_t"] +backfill_inplace_int32 = backfill_inplace["int32_t"] +backfill_inplace_uint64 = backfill_inplace["uint64_t"] +backfill_inplace_bool = backfill_inplace["uint8_t"] + + +@cython.boundscheck(False) +@cython.wraparound(False) +def backfill_2d_inplace(ndarray[algos_t, ndim=2] values, + ndarray[uint8_t, ndim=2] mask, + limit=None): + cdef: + Py_ssize_t i, j, N, K + algos_t val + int lim, fill_count = 0 + + K, N = (<object> values).shape + + # GH#2778 + if N == 0: + return + + if limit is None: + lim = N + else: + if not util.is_integer_object(limit): + raise ValueError('Limit must be an integer') + if limit < 1: + raise ValueError('Limit must be greater than 0') + lim = limit + + for j in range(K): + fill_count = 0 + val = values[j, N - 1] + for i in range(N - 1, -1, -1): + if mask[j, i]: + if fill_count >= lim: + continue + fill_count += 1 + values[j, i] = val + else: + fill_count = 0 + val = values[j, i] + + +backfill_2d_inplace_float64 = backfill_2d_inplace["float64_t"] +backfill_2d_inplace_float32 = backfill_2d_inplace["float32_t"] +backfill_2d_inplace_object = backfill_2d_inplace["object"] +backfill_2d_inplace_int64 = backfill_2d_inplace["int64_t"] +backfill_2d_inplace_int32 = backfill_2d_inplace["int32_t"] +backfill_2d_inplace_uint64 = backfill_2d_inplace["uint64_t"] +backfill_2d_inplace_bool = backfill_2d_inplace["uint8_t"] + + +@cython.wraparound(False) +@cython.boundscheck(False) +def arrmap(ndarray[algos_t] index, object func): + cdef: + Py_ssize_t length = index.shape[0] + Py_ssize_t i = 0 + ndarray[object] result = np.empty(length, dtype=np.object_) + + from pandas._libs.lib import maybe_convert_objects + + for i in range(length): + result[i] = func(index[i]) + + return maybe_convert_objects(result) + + +arrmap_float64 = arrmap["float64_t"] +arrmap_float32 = arrmap["float32_t"] +arrmap_object = arrmap["object"] +arrmap_int64 = arrmap["int64_t"] +arrmap_int32 = arrmap["int32_t"] +arrmap_uint64 = arrmap["uint64_t"] +arrmap_bool = arrmap["uint8_t"] + + +@cython.boundscheck(False) +@cython.wraparound(False) +def is_monotonic(ndarray[algos_t] arr, bint timelike): + """ + Returns + ------- + is_monotonic_inc, is_monotonic_dec, is_unique + """ + cdef: + Py_ssize_t i, n + algos_t prev, cur + bint is_monotonic_inc = 1 + bint is_monotonic_dec = 1 + bint is_unique = 1 + bint is_strict_monotonic = 1 + + n = len(arr) + + if n == 1: + if arr[0] != arr[0] or (timelike and <int64_t>arr[0] == iNaT): + # single value is NaN + return False, False, True + else: + return True, True, True + elif n < 2: + return True, True, True + + if timelike and <int64_t>arr[0] == iNaT: + return False, False, True + + if algos_t is not object: + with nogil: + prev = arr[0] + for i in range(1, n): + cur = arr[i] + if timelike and <int64_t>cur == iNaT: + is_monotonic_inc = 0 + is_monotonic_dec = 0 + break + if cur < prev: + is_monotonic_inc = 0 + elif cur > prev: + is_monotonic_dec = 0 + elif cur == prev: + is_unique = 0 + else: + # cur or prev is NaN + is_monotonic_inc = 0 + is_monotonic_dec = 0 + break + if not is_monotonic_inc and not is_monotonic_dec: + is_monotonic_inc = 0 + is_monotonic_dec = 0 + break + prev = cur + else: + # object-dtype, identical to above except we cannot use `with nogil` + prev = arr[0] + for i in range(1, n): + cur = arr[i] + if timelike and <int64_t>cur == iNaT: + is_monotonic_inc = 0 + is_monotonic_dec = 0 + break + if cur < prev: + is_monotonic_inc = 0 + elif cur > prev: + is_monotonic_dec = 0 + elif cur == prev: + is_unique = 0 + else: + # cur or prev is NaN + is_monotonic_inc = 0 + is_monotonic_dec = 0 + break + if not is_monotonic_inc and not is_monotonic_dec: + is_monotonic_inc = 0 + is_monotonic_dec = 0 + break + prev = cur + + is_strict_monotonic = is_unique and (is_monotonic_inc or is_monotonic_dec) + return is_monotonic_inc, is_monotonic_dec, is_strict_monotonic + + +is_monotonic_float64 = is_monotonic["float64_t"] +is_monotonic_float32 = is_monotonic["float32_t"] +is_monotonic_object = is_monotonic["object"] +is_monotonic_int64 = is_monotonic["int64_t"] +is_monotonic_int32 = is_monotonic["int32_t"] +is_monotonic_uint64 = is_monotonic["uint64_t"] +is_monotonic_bool = is_monotonic["uint8_t"] + + # generated from template include "algos_common_helper.pxi" include "algos_rank_helper.pxi" diff --git a/pandas/_libs/algos_common_helper.pxi.in b/pandas/_libs/algos_common_helper.pxi.in index ed4c0e4c59609..40b1b1a282670 100644 --- a/pandas/_libs/algos_common_helper.pxi.in +++ b/pandas/_libs/algos_common_helper.pxi.in @@ -15,443 +15,6 @@ Template for each `dtype` helper function using 1-d template WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in """ -#---------------------------------------------------------------------- -# 1-d template -#---------------------------------------------------------------------- - -{{py: - -# name, c_type, dtype, can_hold_na, nogil -dtypes = [('float64', 'float64_t', 'np.float64', True, True), - ('float32', 'float32_t', 'np.float32', True, True), - ('object', 'object', 'object', True, False), - ('int32', 'int32_t', 'np.int32', False, True), - ('int64', 'int64_t', 'np.int64', False, True), - ('uint64', 'uint64_t', 'np.uint64', False, True), - ('bool', 'uint8_t', 'np.bool', False, True)] - -def get_dispatch(dtypes): - - for name, c_type, dtype, can_hold_na, nogil in dtypes: - - nogil_str = 'with nogil:' if nogil else '' - tab = ' ' if nogil else '' - yield name, c_type, dtype, can_hold_na, nogil_str, tab -}} - -{{for name, c_type, dtype, can_hold_na, nogil_str, tab - in get_dispatch(dtypes)}} - - -@cython.wraparound(False) -@cython.boundscheck(False) -def map_indices_{{name}}(ndarray[{{c_type}}] index): - """ - Produce a dict mapping the values of the input array to their respective - locations. - - Example: - array(['hi', 'there']) --> {'hi' : 0 , 'there' : 1} - - Better to do this with Cython because of the enormous speed boost. - """ - cdef: - Py_ssize_t i, length - dict result = {} - - length = len(index) - - for i in range(length): - result[index[i]] = i - - return result - - -@cython.boundscheck(False) -@cython.wraparound(False) -def pad_{{name}}(ndarray[{{c_type}}] old, ndarray[{{c_type}}] new, limit=None): - cdef: - Py_ssize_t i, j, nleft, nright - ndarray[int64_t, ndim=1] indexer - {{c_type}} cur, next - int lim, fill_count = 0 - - nleft = len(old) - nright = len(new) - indexer = np.empty(nright, dtype=np.int64) - indexer.fill(-1) - - if limit is None: - lim = nright - else: - if not util.is_integer_object(limit): - raise ValueError('Limit must be an integer') - if limit < 1: - raise ValueError('Limit must be greater than 0') - lim = limit - - if nleft == 0 or nright == 0 or new[nright - 1] < old[0]: - return indexer - - i = j = 0 - - cur = old[0] - - while j <= nright - 1 and new[j] < cur: - j += 1 - - while True: - if j == nright: - break - - if i == nleft - 1: - while j < nright: - if new[j] == cur: - indexer[j] = i - elif new[j] > cur and fill_count < lim: - indexer[j] = i - fill_count += 1 - j += 1 - break - - next = old[i + 1] - - while j < nright and cur <= new[j] < next: - if new[j] == cur: - indexer[j] = i - elif fill_count < lim: - indexer[j] = i - fill_count += 1 - j += 1 - - fill_count = 0 - i += 1 - cur = next - - return indexer - - -@cython.boundscheck(False) -@cython.wraparound(False) -def pad_inplace_{{name}}(ndarray[{{c_type}}] values, - ndarray[uint8_t, cast=True] mask, - limit=None): - cdef: - Py_ssize_t i, N - {{c_type}} val - int lim, fill_count = 0 - - N = len(values) - - # GH 2778 - if N == 0: - return - - if limit is None: - lim = N - else: - if not util.is_integer_object(limit): - raise ValueError('Limit must be an integer') - if limit < 1: - raise ValueError('Limit must be greater than 0') - lim = limit - - val = values[0] - for i in range(N): - if mask[i]: - if fill_count >= lim: - continue - fill_count += 1 - values[i] = val - else: - fill_count = 0 - val = values[i] - - -@cython.boundscheck(False) -@cython.wraparound(False) -def pad_2d_inplace_{{name}}(ndarray[{{c_type}}, ndim=2] values, - ndarray[uint8_t, ndim=2] mask, - limit=None): - cdef: - Py_ssize_t i, j, N, K - {{c_type}} val - int lim, fill_count = 0 - - K, N = (<object> values).shape - - # GH 2778 - if N == 0: - return - - if limit is None: - lim = N - else: - if not util.is_integer_object(limit): - raise ValueError('Limit must be an integer') - if limit < 1: - raise ValueError('Limit must be greater than 0') - lim = limit - - for j in range(K): - fill_count = 0 - val = values[j, 0] - for i in range(N): - if mask[j, i]: - if fill_count >= lim: - continue - fill_count += 1 - values[j, i] = val - else: - fill_count = 0 - val = values[j, i] - -""" -Backfilling logic for generating fill vector - -Diagram of what's going on - -Old New Fill vector Mask - . 0 1 - . 0 1 - . 0 1 -A A 0 1 - . 1 1 - . 1 1 - . 1 1 - . 1 1 - . 1 1 -B B 1 1 - . 2 1 - . 2 1 - . 2 1 -C C 2 1 - . 0 - . 0 -D -""" - - -@cython.boundscheck(False) -@cython.wraparound(False) -def backfill_{{name}}(ndarray[{{c_type}}] old, ndarray[{{c_type}}] new, - limit=None): - cdef: - Py_ssize_t i, j, nleft, nright - ndarray[int64_t, ndim=1] indexer - {{c_type}} cur, prev - int lim, fill_count = 0 - - nleft = len(old) - nright = len(new) - indexer = np.empty(nright, dtype=np.int64) - indexer.fill(-1) - - if limit is None: - lim = nright - else: - if not util.is_integer_object(limit): - raise ValueError('Limit must be an integer') - if limit < 1: - raise ValueError('Limit must be greater than 0') - lim = limit - - if nleft == 0 or nright == 0 or new[0] > old[nleft - 1]: - return indexer - - i = nleft - 1 - j = nright - 1 - - cur = old[nleft - 1] - - while j >= 0 and new[j] > cur: - j -= 1 - - while True: - if j < 0: - break - - if i == 0: - while j >= 0: - if new[j] == cur: - indexer[j] = i - elif new[j] < cur and fill_count < lim: - indexer[j] = i - fill_count += 1 - j -= 1 - break - - prev = old[i - 1] - - while j >= 0 and prev < new[j] <= cur: - if new[j] == cur: - indexer[j] = i - elif new[j] < cur and fill_count < lim: - indexer[j] = i - fill_count += 1 - j -= 1 - - fill_count = 0 - i -= 1 - cur = prev - - return indexer - - -@cython.boundscheck(False) -@cython.wraparound(False) -def backfill_inplace_{{name}}(ndarray[{{c_type}}] values, - ndarray[uint8_t, cast=True] mask, - limit=None): - cdef: - Py_ssize_t i, N - {{c_type}} val - int lim, fill_count = 0 - - N = len(values) - - # GH 2778 - if N == 0: - return - - if limit is None: - lim = N - else: - if not util.is_integer_object(limit): - raise ValueError('Limit must be an integer') - if limit < 1: - raise ValueError('Limit must be greater than 0') - lim = limit - - val = values[N - 1] - for i in range(N - 1, -1, -1): - if mask[i]: - if fill_count >= lim: - continue - fill_count += 1 - values[i] = val - else: - fill_count = 0 - val = values[i] - - -@cython.boundscheck(False) -@cython.wraparound(False) -def backfill_2d_inplace_{{name}}(ndarray[{{c_type}}, ndim=2] values, - ndarray[uint8_t, ndim=2] mask, - limit=None): - cdef: - Py_ssize_t i, j, N, K - {{c_type}} val - int lim, fill_count = 0 - - K, N = (<object> values).shape - - # GH 2778 - if N == 0: - return - - if limit is None: - lim = N - else: - if not util.is_integer_object(limit): - raise ValueError('Limit must be an integer') - if limit < 1: - raise ValueError('Limit must be greater than 0') - lim = limit - - for j in range(K): - fill_count = 0 - val = values[j, N - 1] - for i in range(N - 1, -1, -1): - if mask[j, i]: - if fill_count >= lim: - continue - fill_count += 1 - values[j, i] = val - else: - fill_count = 0 - val = values[j, i] - - -@cython.boundscheck(False) -@cython.wraparound(False) -def is_monotonic_{{name}}(ndarray[{{c_type}}] arr, bint timelike): - """ - Returns - ------- - is_monotonic_inc, is_monotonic_dec, is_unique - """ - cdef: - Py_ssize_t i, n - {{c_type}} prev, cur - bint is_monotonic_inc = 1 - bint is_monotonic_dec = 1 - bint is_unique = 1 - - n = len(arr) - - if n == 1: - if arr[0] != arr[0] or (timelike and <int64_t>arr[0] == iNaT): - # single value is NaN - return False, False, True - else: - return True, True, True - elif n < 2: - return True, True, True - - if timelike and <int64_t>arr[0] == iNaT: - return False, False, True - - {{nogil_str}} - {{tab}}prev = arr[0] - {{tab}}for i in range(1, n): - {{tab}} cur = arr[i] - {{tab}} if timelike and <int64_t>cur == iNaT: - {{tab}} is_monotonic_inc = 0 - {{tab}} is_monotonic_dec = 0 - {{tab}} break - {{tab}} if cur < prev: - {{tab}} is_monotonic_inc = 0 - {{tab}} elif cur > prev: - {{tab}} is_monotonic_dec = 0 - {{tab}} elif cur == prev: - {{tab}} is_unique = 0 - {{tab}} else: - {{tab}} # cur or prev is NaN - {{tab}} is_monotonic_inc = 0 - {{tab}} is_monotonic_dec = 0 - {{tab}} break - {{tab}} if not is_monotonic_inc and not is_monotonic_dec: - {{tab}} is_monotonic_inc = 0 - {{tab}} is_monotonic_dec = 0 - {{tab}} break - {{tab}} prev = cur - return is_monotonic_inc, is_monotonic_dec, \ - is_unique and (is_monotonic_inc or is_monotonic_dec) - - -@cython.wraparound(False) -@cython.boundscheck(False) -def arrmap_{{name}}(ndarray[{{c_type}}] index, object func): - cdef: - Py_ssize_t length = index.shape[0] - Py_ssize_t i = 0 - ndarray[object] result = np.empty(length, dtype=np.object_) - - from pandas._libs.lib import maybe_convert_objects - - for i in range(length): - result[i] = func(index[i]) - - return maybe_convert_objects(result) - -{{endfor}} - -#---------------------------------------------------------------------- -# put template -#---------------------------------------------------------------------- - {{py: # name, c_type, dest_type, dest_dtype
Broken off of #22432, which was a proof of concept.
https://api.github.com/repos/pandas-dev/pandas/pulls/22452
2018-08-21T22:02:35Z
2018-09-18T12:45:00Z
2018-09-18T12:45:00Z
2019-04-30T06:24:01Z
BUG fix IntegerArray.astype int -> uint
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index d7feb6e547b22..7f19bf9cfb5ea 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -42,7 +42,7 @@ Pandas has gained the ability to hold integer dtypes with missing values. This l Here is an example of the usage. We can construct a ``Series`` with the specified dtype. The dtype string ``Int64`` is a pandas ``ExtensionDtype``. Specifying a list or array using the traditional missing value -marker of ``np.nan`` will infer to integer dtype. The display of the ``Series`` will also use the ``NaN`` to indicate missing values in string outputs. (:issue:`20700`, :issue:`20747`) +marker of ``np.nan`` will infer to integer dtype. The display of the ``Series`` will also use the ``NaN`` to indicate missing values in string outputs. (:issue:`20700`, :issue:`20747`, :issue:`22441`) .. ipython:: python diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index 3dffabbe473d3..5f6a96833c4f8 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -409,8 +409,7 @@ def astype(self, dtype, copy=True): # if we are astyping to an existing IntegerDtype we can fastpath if isinstance(dtype, _IntegerDtype): - result = self._data.astype(dtype.numpy_dtype, - casting='same_kind', copy=False) + result = self._data.astype(dtype.numpy_dtype, copy=False) return type(self)(result, mask=self._mask, copy=False) # coerce diff --git a/pandas/tests/extension/integer/test_integer.py b/pandas/tests/extension/integer/test_integer.py index e2248285fd2a0..3af127091d2d8 100644 --- a/pandas/tests/extension/integer/test_integer.py +++ b/pandas/tests/extension/integer/test_integer.py @@ -566,16 +566,17 @@ def test_astype(self, all_data): expected = pd.Series(np.asarray(mixed)) tm.assert_series_equal(result, expected) - @pytest.mark.parametrize('dtype', [Int8Dtype(), 'Int8']) + @pytest.mark.parametrize('dtype', [Int8Dtype(), 'Int8', + UInt32Dtype(), 'UInt32']) def test_astype_specific_casting(self, dtype): s = pd.Series([1, 2, 3], dtype='Int64') result = s.astype(dtype) - expected = pd.Series([1, 2, 3], dtype='Int8') + expected = pd.Series([1, 2, 3], dtype=dtype) self.assert_series_equal(result, expected) s = pd.Series([1, 2, 3, None], dtype='Int64') result = s.astype(dtype) - expected = pd.Series([1, 2, 3, None], dtype='Int8') + expected = pd.Series([1, 2, 3, None], dtype=dtype) self.assert_series_equal(result, expected) def test_construct_cast_invalid(self, dtype):
Closes https://github.com/pandas-dev/pandas/issues/22440
https://api.github.com/repos/pandas-dev/pandas/pulls/22441
2018-08-21T14:35:47Z
2018-08-22T10:08:24Z
2018-08-22T10:08:24Z
2018-08-22T10:19:18Z
BUG #19860 Corrected use of mixed indexes with .at
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 3e22084d98234..e70f3f0f6164b 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -663,6 +663,7 @@ Indexing - Fixed ``DataFrame[np.nan]`` when columns are non-unique (:issue:`21428`) - Bug when indexing :class:`DatetimeIndex` with nanosecond resolution dates and timezones (:issue:`11679`) - Bug where indexing with a Numpy array containing negative values would mutate the indexer (:issue:`21867`) +- Bug where mixed indexes wouldn't allow integers for ``.at`` (:issue:`19860`) - ``Float64Index.get_loc`` now raises ``KeyError`` when boolean key passed. (:issue:`19087`) Missing diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 2a0f164887543..7b7fb968b3050 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3125,8 +3125,8 @@ def get_value(self, series, key): iloc = self.get_loc(key) return s[iloc] except KeyError: - if (len(self) > 0 and - self.inferred_type in ['integer', 'boolean']): + if (len(self) > 0 + and (self.holds_integer() or self.is_boolean())): raise elif is_integer(key): return s[key] @@ -3139,7 +3139,7 @@ def get_value(self, series, key): return self._engine.get_value(s, k, tz=getattr(series.dtype, 'tz', None)) except KeyError as e1: - if len(self) > 0 and self.inferred_type in ['integer', 'boolean']: + if len(self) > 0 and (self.holds_integer() or self.is_boolean()): raise try: diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 80b3d579d5447..a245ecfa007f3 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -2354,7 +2354,7 @@ def _convert_key(self, key, is_setter=False): raise ValueError("At based indexing on an integer index " "can only have integer indexers") else: - if is_integer(i): + if is_integer(i) and not ax.holds_integer(): raise ValueError("At based indexing on an non-integer " "index can only have non-integer " "indexers") diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index 9c992770fc64c..f64c50699461f 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -610,6 +610,22 @@ def test_index_contains(self, index, val): def test_index_not_contains(self, index, val): assert val not in index + @pytest.mark.parametrize("index,val", [ + (Index([0, 1, '2']), 0), + (Index([0, 1, '2']), '2'), + ]) + def test_mixed_index_contains(self, index, val): + # GH 19860 + assert val in index + + @pytest.mark.parametrize("index,val", [ + (Index([0, 1, '2']), '1'), + (Index([0, 1, '2']), 2), + ]) + def test_mixed_index_not_contains(self, index, val): + # GH 19860 + assert val not in index + def test_index_type_coercion(self): with catch_warnings(record=True): @@ -710,6 +726,22 @@ def test_float_index_at_iat(self): for i in range(len(s)): assert s.iat[i] == i + 1 + def test_mixed_index_assignment(self): + # GH 19860 + s = Series([1, 2, 3, 4, 5], index=['a', 'b', 'c', 1, 2]) + s.at['a'] = 11 + assert s.iat[0] == 11 + s.at[1] = 22 + assert s.iat[3] == 22 + + def test_mixed_index_no_fallback(self): + # GH 19860 + s = Series([1, 2, 3, 4, 5], index=['a', 'b', 'c', 1, 2]) + with pytest.raises(KeyError): + s.at[0] + with pytest.raises(KeyError): + s.at[4] + def test_rhs_alignment(self): # GH8258, tests that both rows & columns are aligned to what is # assigned to. covers both uniform data-type & multi-type cases diff --git a/pandas/tests/indexing/test_scalar.py b/pandas/tests/indexing/test_scalar.py index 7314ff6619049..91f006e23e878 100644 --- a/pandas/tests/indexing/test_scalar.py +++ b/pandas/tests/indexing/test_scalar.py @@ -170,3 +170,33 @@ def test_at_with_tz(self): result = df.at[0, 'date'] assert result == expected + + def test_mixed_index_at_iat_loc_iloc_series(self): + # GH 19860 + s = Series([1, 2, 3, 4, 5], index=['a', 'b', 'c', 1, 2]) + for el, item in s.iteritems(): + assert s.at[el] == s.loc[el] == item + for i in range(len(s)): + assert s.iat[i] == s.iloc[i] == i + 1 + + with pytest.raises(KeyError): + s.at[4] + with pytest.raises(KeyError): + s.loc[4] + + def test_mixed_index_at_iat_loc_iloc_dataframe(self): + # GH 19860 + df = DataFrame([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], + columns=['a', 'b', 'c', 1, 2]) + for rowIdx, row in df.iterrows(): + for el, item in row.iteritems(): + assert df.at[rowIdx, el] == df.loc[rowIdx, el] == item + + for row in range(2): + for i in range(5): + assert df.iat[row, i] == df.iloc[row, i] == row * 5 + i + + with pytest.raises(KeyError): + df.at[0, 3] + with pytest.raises(KeyError): + df.loc[0, 3]
`.at` incorrectly disallowed the use of integer indexes when a mixed index was used ``` s = Series([1,2,3,4,5],index=['a','b','c',1,2]) s.at['a'] # returns 1 s.at[1] # returns 4 s.at[4] # raises KeyError, doesn't do fallback indexing ``` Made sure fallback indexing doesn't happen on mixed indexes - [X] closes #19860 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/22436
2018-08-21T06:54:14Z
2018-08-29T12:49:03Z
2018-08-29T12:49:03Z
2018-09-10T05:48:38Z
Pin blosc to 1.14.3 for travis-27 build
diff --git a/ci/travis-27.yaml b/ci/travis-27.yaml index 3e94f334174e6..9c0347de9adfb 100644 --- a/ci/travis-27.yaml +++ b/ci/travis-27.yaml @@ -29,6 +29,7 @@ dependencies: - PyCrypto - pymysql=0.6.3 - pytables + - blosc=1.14.3 - python-blosc - python-dateutil=2.5.0 - python=2.7* diff --git a/pandas/tests/extension/integer/test_integer.py b/pandas/tests/extension/integer/test_integer.py index f1c833a68c66c..e2248285fd2a0 100644 --- a/pandas/tests/extension/integer/test_integer.py +++ b/pandas/tests/extension/integer/test_integer.py @@ -770,7 +770,7 @@ def test_groupby_mean_included(): df = pd.DataFrame({ "A": ['a', 'b', 'b'], "B": [1, None, 3], - "C": IntegerArray([1, None, 3], dtype='Int64'), + "C": integer_array([1, None, 3], dtype='Int64'), }) result = df.groupby("A").sum() @@ -784,7 +784,7 @@ def test_groupby_mean_included(): def test_astype_nansafe(): # https://github.com/pandas-dev/pandas/pull/22343 - arr = IntegerArray([np.nan, 1, 2], dtype="Int8") + arr = integer_array([np.nan, 1, 2], dtype="Int8") with tm.assert_raises_regex( ValueError, 'cannot convert float NaN to integer'): diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py index d096daaa0b664..3fe1c84174acb 100644 --- a/pandas/tests/frame/test_block_internals.py +++ b/pandas/tests/frame/test_block_internals.py @@ -12,7 +12,7 @@ from pandas import (DataFrame, Series, Timestamp, date_range, compat, option_context, Categorical) -from pandas.core.arrays import IntegerArray, IntervalArray +from pandas.core.arrays import IntervalArray, integer_array from pandas.compat import StringIO import pandas as pd @@ -440,9 +440,9 @@ def test_get_numeric_data(self): def test_get_numeric_data_extension_dtype(self): # GH 22290 df = DataFrame({ - 'A': IntegerArray([-10, np.nan, 0, 10, 20, 30], dtype='Int64'), + 'A': integer_array([-10, np.nan, 0, 10, 20, 30], dtype='Int64'), 'B': Categorical(list('abcabc')), - 'C': IntegerArray([0, 1, 2, 3, np.nan, 5], dtype='UInt8'), + 'C': integer_array([0, 1, 2, 3, np.nan, 5], dtype='UInt8'), 'D': IntervalArray.from_breaks(range(7))}) result = df._get_numeric_data() expected = df.loc[:, ['A', 'C']]
This reverts commit 52e186af3e0fed9415a2db9e0c8d7d75d1267a54. xref https://github.com/pandas-dev/pandas/issues/22427 and https://github.com/pandas-dev/pandas/pull/22424
https://api.github.com/repos/pandas-dev/pandas/pulls/22429
2018-08-20T13:09:47Z
2018-08-20T18:20:29Z
2018-08-20T18:20:29Z
2018-08-20T18:20:32Z
Backport PR #22424 on branch 0.23.x
diff --git a/ci/travis-36-doc.yaml b/ci/travis-36-doc.yaml index c22dddbe0ba3f..8705b82412e7c 100644 --- a/ci/travis-36-doc.yaml +++ b/ci/travis-36-doc.yaml @@ -36,6 +36,7 @@ dependencies: - sphinx - sqlalchemy - statsmodels + - tzlocal - xarray - xlrd - xlsxwriter
Backport PR #22424: CI: add missing tzlocal dependency (rpy2, doc build)
https://api.github.com/repos/pandas-dev/pandas/pulls/22425
2018-08-20T05:59:23Z
2018-08-20T11:04:29Z
2018-08-20T11:04:29Z
2018-08-20T11:04:29Z
CI: add missing tzlocal dependency (rpy2, doc build)
diff --git a/ci/travis-36-doc.yaml b/ci/travis-36-doc.yaml index 153a81197a6c7..abb0426dbe08e 100644 --- a/ci/travis-36-doc.yaml +++ b/ci/travis-36-doc.yaml @@ -36,6 +36,7 @@ dependencies: - sphinx - sqlalchemy - statsmodels + - tzlocal - xarray - xlrd - xlsxwriter
- [x] closes #22412 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/22424
2018-08-19T22:27:36Z
2018-08-20T05:58:42Z
2018-08-20T05:58:42Z
2018-08-20T05:58:53Z