title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
ENH: Implement "standard error of the mean"
diff --git a/doc/source/api.rst b/doc/source/api.rst index c037dfa8d7acf..bc257ffa0ad6c 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -350,6 +350,7 @@ Computations / Descriptive Stats Series.prod Series.quantile Series.rank + Series.sem Series.skew Series.std Series.sum @@ -642,6 +643,7 @@ Computations / Descriptive Stats DataFrame.prod DataFrame.quantile DataFrame.rank + DataFrame.sem DataFrame.skew DataFrame.sum DataFrame.std @@ -895,6 +897,7 @@ Computations / Descriptive Stats Panel.min Panel.pct_change Panel.prod + Panel.sem Panel.skew Panel.sum Panel.std @@ -1222,6 +1225,7 @@ Computations / Descriptive Stats GroupBy.mean GroupBy.median + GroupBy.sem GroupBy.std GroupBy.var GroupBy.ohlc diff --git a/doc/source/basics.rst b/doc/source/basics.rst index dd1ea5678698d..f614e1b7edcf4 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -428,6 +428,7 @@ optional ``level`` parameter which applies only if the object has a ``prod``, Product of values ``std``, Unbiased standard deviation ``var``, Unbiased variance + ``sem``, Unbiased standard error of the mean ``skew``, Unbiased skewness (3rd moment) ``kurt``, Unbiased kurtosis (4th moment) ``quantile``, Sample quantile (value at %) diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst index cad557756f897..c0db87d58ef08 100644 --- a/doc/source/groupby.rst +++ b/doc/source/groupby.rst @@ -397,7 +397,7 @@ index are the group names and whose values are the sizes of each group. named *columns*. Aggregating functions are ones that reduce the dimension of the returned objects, - for example: ``mean, sum, size, count, std, var, describe, first, last, nth, min, max``. This is + for example: ``mean, sum, size, count, std, var, sem, describe, first, last, nth, min, max``. This is what happens when you do for example ``DataFrame.sum()`` and get back a ``Series``. ``nth`` can act as a reducer *or* a filter, see :ref:`here <groupby.nth>` @@ -457,7 +457,7 @@ must be either implemented on GroupBy or available via :ref:`dispatching Cython-optimized aggregation functions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Some common aggregations, currently only ``sum``, ``mean``, and ``std``, have +Some common aggregations, currently only ``sum``, ``mean``, ``std``, and ``sem``, have optimized Cython implementations: .. ipython:: python diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 48acacd7ced08..53efe061e218f 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -974,8 +974,8 @@ an array and produces aggregated values: ts.resample('5Min', how=np.max) Any function available via :ref:`dispatching <groupby.dispatch>` can be given to -the ``how`` parameter by name, including ``sum``, ``mean``, ``std``, ``max``, -``min``, ``median``, ``first``, ``last``, ``ohlc``. +the ``how`` parameter by name, including ``sum``, ``mean``, ``std``, ``sem``, +``max``, ``min``, ``median``, ``first``, ``last``, ``ohlc``. For downsampling, ``closed`` can be set to 'left' or 'right' to specify which end of the interval is closed: diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index ff35ce9ca3069..f83cd50bbd8c5 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -73,6 +73,9 @@ Enhancements See :ref:`the docs <timeseries.timezone>`. +- Implemented ``sem`` (standard error of the mean) operation for ``Series``, + ``DataFrame``, ``Panel``, and ``Groupby`` (:issue:`6897`) + .. _whatsnew_0141.performance: Performance diff --git a/pandas/core/generic.py b/pandas/core/generic.py index ed0d92683ad54..4500a9181f5d9 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3794,7 +3794,8 @@ def mad(self, axis=None, skipna=None, level=None, **kwargs): @Substitution(outname='variance', desc="Return unbiased variance over requested " - "axis\nNormalized by N-1") + "axis.\n\nNormalized by N-1 by default. " + "This can be changed using the ddof argument") @Appender(_num_doc) def var(self, axis=None, skipna=None, level=None, ddof=1, **kwargs): if skipna is None: @@ -3811,7 +3812,8 @@ def var(self, axis=None, skipna=None, level=None, ddof=1, **kwargs): @Substitution(outname='stdev', desc="Return unbiased standard deviation over requested " - "axis\nNormalized by N-1") + "axis.\n\nNormalized by N-1 by default. " + "This can be changed using the ddof argument") @Appender(_num_doc) def std(self, axis=None, skipna=None, level=None, ddof=1, **kwargs): if skipna is None: @@ -3827,6 +3829,24 @@ def std(self, axis=None, skipna=None, level=None, ddof=1, **kwargs): return np.sqrt(result) cls.std = std + @Substitution(outname='standarderror', + desc="Return unbiased standard error of the mean over " + "requested axis.\n\nNormalized by N-1 by default. " + "This can be changed using the ddof argument") + @Appender(_num_doc) + def sem(self, axis=None, skipna=None, level=None, ddof=1, **kwargs): + if skipna is None: + skipna = True + if axis is None: + axis = self._stat_axis_number + if level is not None: + return self._agg_by_level('sem', axis=axis, level=level, + skipna=skipna, ddof=ddof) + + return self._reduce(nanops.nansem, axis=axis, skipna=skipna, + ddof=ddof) + cls.sem = sem + @Substitution(outname='compounded', desc="Return the compound percentage of the values for " "the requested axis") diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 1b07e2fb0aeab..2714e9f22cd95 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -486,7 +486,7 @@ def __getattr__(self, attr): (type(self).__name__, attr)) def __getitem__(self, key): - raise NotImplementedError + raise NotImplementedError('Not implemented: %s' % key) def _make_wrapper(self, name): if name not in self._apply_whitelist: @@ -695,12 +695,7 @@ def std(self, ddof=1): For multiple groupings, the result index will be a MultiIndex """ # todo, implement at cython level? - if ddof == 1: - return self._cython_agg_general('std') - else: - self._set_selection_from_grouper() - f = lambda x: x.std(ddof=ddof) - return self._python_agg_general(f) + return np.sqrt(self.var(ddof=ddof)) def var(self, ddof=1): """ @@ -715,6 +710,14 @@ def var(self, ddof=1): f = lambda x: x.var(ddof=ddof) return self._python_agg_general(f) + def sem(self, ddof=1): + """ + Compute standard error of the mean of groups, excluding missing values + + For multiple groupings, the result index will be a MultiIndex + """ + return self.std(ddof=ddof)/np.sqrt(self.count()) + def size(self): """ Compute group sizes @@ -1332,7 +1335,6 @@ def get_group_levels(self): 'name': 'group_median' }, 'var': 'group_var', - 'std': 'group_var', 'first': { 'name': 'group_nth', 'f': lambda func, a, b, c, d: func(a, b, c, d, 1) @@ -1341,10 +1343,6 @@ def get_group_levels(self): 'count': 'group_count', } - _cython_transforms = { - 'std': np.sqrt, - } - _cython_arity = { 'ohlc': 4, # OHLC } @@ -1455,7 +1453,6 @@ def aggregate(self, values, how, axis=0): def _aggregate(self, result, counts, values, how, is_numeric): agg_func, dtype = self._get_aggregate_function(how, values) - trans_func = self._cython_transforms.get(how, lambda x: x) comp_ids, _, ngroups = self.group_info if values.ndim > 3: @@ -1469,7 +1466,7 @@ def _aggregate(self, result, counts, values, how, is_numeric): else: agg_func(result, counts, values, comp_ids) - return trans_func(result) + return result def agg_series(self, obj, func): try: @@ -1669,7 +1666,6 @@ def names(self): 'min': 'group_min_bin', 'max': 'group_max_bin', 'var': 'group_var_bin', - 'std': 'group_var_bin', 'ohlc': 'group_ohlc', 'first': { 'name': 'group_nth_bin', @@ -1688,7 +1684,6 @@ def names(self): def _aggregate(self, result, counts, values, how, is_numeric=True): agg_func, dtype = self._get_aggregate_function(how, values) - trans_func = self._cython_transforms.get(how, lambda x: x) if values.ndim > 3: # punting for now @@ -1699,7 +1694,7 @@ def _aggregate(self, result, counts, values, how, is_numeric=True): else: agg_func(result, counts, values, self.bins) - return trans_func(result) + return result def agg_series(self, obj, func): dummy = obj[:0] diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 46d2358768384..b40334c1857ac 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -308,6 +308,24 @@ def get_median(x): return _wrap_results(get_median(values), dtype) if notempty else np.nan +def _get_counts_nanvar(mask, axis, ddof): + count = _get_counts(mask, axis) + + d = count-ddof + + # always return NaN, never inf + if np.isscalar(count): + if count <= ddof: + count = np.nan + d = np.nan + else: + mask2 = count <= ddof + if mask2.any(): + np.putmask(d, mask2, np.nan) + np.putmask(count, mask2, np.nan) + return count, d + + @disallow('M8') @bottleneck_switch(ddof=1) def nanvar(values, axis=None, skipna=True, ddof=1): @@ -316,31 +334,28 @@ def nanvar(values, axis=None, skipna=True, ddof=1): mask = isnull(values) - if axis is not None: - count = (values.shape[axis] - mask.sum(axis)).astype(float) - else: - count = float(values.size - mask.sum()) + count, d = _get_counts_nanvar(mask, axis, ddof) - d = count-ddof if skipna: values = values.copy() np.putmask(values, mask, 0) - # always return NaN, never inf - if np.isscalar(count): - if count <= ddof: - count = np.nan - d = np.nan - else: - mask = count <= ddof - if mask.any(): - np.putmask(d, mask, np.nan) - np.putmask(count, mask, np.nan) - X = _ensure_numeric(values.sum(axis)) XX = _ensure_numeric((values ** 2).sum(axis)) return np.fabs((XX - X ** 2 / count) / d) + +def nansem(values, axis=None, skipna=True, ddof=1): + var = nanvar(values, axis, skipna, ddof=ddof) + + if not isinstance(values.dtype.type, np.floating): + values = values.astype('f8') + mask = isnull(values) + count, _ = _get_counts_nanvar(mask, axis, ddof) + + return np.sqrt(var)/np.sqrt(count) + + @bottleneck_switch() def nanmin(values, axis=None, skipna=True): values, mask, dtype, dtype_max = _get_values(values, skipna, fill_value_typ='+inf') diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 358d9d82403f6..c4475715386b9 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -10864,6 +10864,23 @@ def test_var_std(self): self.assertFalse((result < 0).any()) nanops._USE_BOTTLENECK = True + def test_sem(self): + alt = lambda x: np.std(x, ddof=1)/np.sqrt(len(x)) + self._check_stat_op('sem', alt) + + result = self.tsframe.sem(ddof=4) + expected = self.tsframe.apply(lambda x: x.std(ddof=4)/np.sqrt(len(x))) + assert_almost_equal(result, expected) + + arr = np.repeat(np.random.random((1, 1000)), 1000, 0) + result = nanops.nansem(arr, axis=0) + self.assertFalse((result < 0).any()) + if nanops._USE_BOTTLENECK: + nanops._USE_BOTTLENECK = False + result = nanops.nansem(arr, axis=0) + self.assertFalse((result < 0).any()) + nanops._USE_BOTTLENECK = True + def test_skew(self): _skip_if_no_scipy() from scipy.stats import skew diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index 475f5085d55f5..4aae5dfea3982 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -681,11 +681,14 @@ def _check_results(grouped): assert_frame_equal(result, expected) # group frame by function dict - result = grouped.agg( - OrderedDict([['A', 'var'], ['B', 'std'], ['C', 'mean']])) + result = grouped.agg(OrderedDict([['A', 'var'], + ['B', 'std'], + ['C', 'mean'], + ['D', 'sem']])) expected = DataFrame(OrderedDict([['A', grouped['A'].var()], ['B', grouped['B'].std()], - ['C', grouped['C'].mean()]])) + ['C', grouped['C'].mean()], + ['D', grouped['D'].sem()]])) assert_frame_equal(result, expected) by_weekday = self.tsframe.groupby(lambda x: x.weekday()) @@ -1633,8 +1636,13 @@ def _testit(op): result = op(grouped)['C'] assert_series_equal(result, exp) + _testit(lambda x: x.count()) _testit(lambda x: x.sum()) + _testit(lambda x: x.std()) + _testit(lambda x: x.var()) + _testit(lambda x: x.sem()) _testit(lambda x: x.mean()) + _testit(lambda x: x.median()) _testit(lambda x: x.prod()) _testit(lambda x: x.min()) _testit(lambda x: x.max()) @@ -4166,8 +4174,8 @@ def test_tab_completion(self): 'agg','aggregate','apply','boxplot','filter','first','get_group', 'groups','hist','indices','last','max','mean','median', 'min','name','ngroups','nth','ohlc','plot', 'prod', - 'size','std','sum','transform','var', 'count', 'head', 'describe', - 'cummax', 'quantile', 'rank', 'cumprod', 'tail', + 'size', 'std', 'sum', 'transform', 'var', 'sem', 'count', 'head', + 'describe', 'cummax', 'quantile', 'rank', 'cumprod', 'tail', 'resample', 'cummin', 'fillna', 'cumsum', 'cumcount', 'all', 'shift', 'skew', 'bfill', 'irow', 'ffill', 'take', 'tshift', 'pct_change', 'any', 'mad', 'corr', 'corrwith', @@ -4306,6 +4314,61 @@ def __eq__(self, other): name='grp')) tm.assert_frame_equal(result, expected) + def test__cython_agg_general(self): + ops = [('mean', np.mean), + ('median', np.median), + ('var', np.var), + ('add', np.sum), + ('prod', np.prod), + ('min', np.min), + ('max', np.max), + ('first', lambda x: x.iloc[0]), + ('last', lambda x: x.iloc[-1]), + ('count', np.size), + ] + df = DataFrame(np.random.randn(1000)) + labels = np.random.randint(0, 50, size=1000).astype(float) + + for op, targop in ops: + result = df.groupby(labels)._cython_agg_general(op) + expected = df.groupby(labels).agg(targop) + try: + tm.assert_frame_equal(result, expected) + except BaseException as exc: + exc.args += ('operation: %s' % op,) + raise + + def test_ops_general(self): + ops = [('mean', np.mean), + ('median', np.median), + ('std', np.std), + ('var', np.var), + ('sum', np.sum), + ('prod', np.prod), + ('min', np.min), + ('max', np.max), + ('first', lambda x: x.iloc[0]), + ('last', lambda x: x.iloc[-1]), + ('count', np.size), + ] + try: + from scipy.stats import sem + except ImportError: + pass + else: + ops.append(('sem', sem)) + df = DataFrame(np.random.randn(1000)) + labels = np.random.randint(0, 50, size=1000).astype(float) + + for op, targop in ops: + result = getattr(df.groupby(labels), op)().astype(float) + expected = df.groupby(labels).agg(targop) + try: + tm.assert_frame_equal(result, expected) + except BaseException as exc: + exc.args += ('operation: %s' % op,) + raise + def assert_fp_equal(a, b): assert (np.abs(a - b) < 1e-12).all() diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 07b753b6724d8..b8ccfb3eb151b 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -1378,7 +1378,7 @@ def test_count(self): self.assertRaises(KeyError, frame.count, level='x') AGG_FUNCTIONS = ['sum', 'prod', 'min', 'max', 'median', 'mean', 'skew', - 'mad', 'std', 'var'] + 'mad', 'std', 'var', 'sem'] def test_series_group_min_max(self): for op, level, skipna in cart_product(self.AGG_FUNCTIONS, diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 2ed24832c3270..34ab401eac283 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -127,6 +127,13 @@ def alt(x): return np.std(x, ddof=1) self._check_stat_op('std', alt) + def test_sem(self): + def alt(x): + if len(x) < 2: + return np.nan + return np.std(x, ddof=1)/np.sqrt(len(x)) + self._check_stat_op('sem', alt) + # def test_skew(self): # from scipy.stats import skew diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py index 55b6535be9078..7dc5d9bd411fb 100644 --- a/pandas/tests/test_panel4d.py +++ b/pandas/tests/test_panel4d.py @@ -98,6 +98,13 @@ def alt(x): return np.std(x, ddof=1) self._check_stat_op('std', alt) + def test_sem(self): + def alt(x): + if len(x) < 2: + return np.nan + return np.std(x, ddof=1)/np.sqrt(len(x)) + self._check_stat_op('sem', alt) + # def test_skew(self): # from scipy.stats import skew diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 44587248e6d51..a822b0891edc4 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -1980,6 +1980,19 @@ def test_var_std(self): result = s.std(ddof=1) self.assertTrue(isnull(result)) + def test_sem(self): + alt = lambda x: np.std(x, ddof=1)/np.sqrt(len(x)) + self._check_stat_op('sem', alt) + + result = self.ts.sem(ddof=4) + expected = np.std(self.ts.values, ddof=4)/np.sqrt(len(self.ts.values)) + assert_almost_equal(result, expected) + + # 1 - element series with ddof=1 + s = self.ts.iloc[[0]] + result = s.sem(ddof=1) + self.assert_(isnull(result)) + def test_skew(self): _skip_if_no_scipy() diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py index 2345738002029..cdf62af1fd90b 100644 --- a/pandas/tseries/tests/test_resample.py +++ b/pandas/tseries/tests/test_resample.py @@ -5,7 +5,8 @@ from pandas.compat import range, lrange, zip, product import numpy as np -from pandas import Series, TimeSeries, DataFrame, Panel, isnull, notnull, Timestamp +from pandas import (Series, TimeSeries, DataFrame, Panel, Index, + isnull, notnull, Timestamp) from pandas.tseries.index import date_range from pandas.tseries.offsets import Minute, BDay @@ -104,6 +105,48 @@ def test_resample_basic(self): expect = s.groupby(grouper).agg(lambda x: x[-1]) assert_series_equal(result, expect) + def test_resample_how(self): + rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', + freq='min', name='index') + s = Series(np.random.randn(14), index=rng) + grouplist = np.ones_like(s) + grouplist[0] = 0 + grouplist[1:6] = 1 + grouplist[6:11] = 2 + grouplist[11:] = 3 + args = ['sum', 'mean', 'std', 'sem', 'max', 'min', + 'median', 'first', 'last', 'ohlc'] + + def _ohlc(group): + if isnull(group).all(): + return np.repeat(np.nan, 4) + return [group[0], group.max(), group.min(), group[-1]] + inds = date_range('1/1/2000', periods=4, freq='5min') + + for arg in args: + if arg == 'ohlc': + func = _ohlc + else: + func = arg + try: + result = s.resample('5min', how=arg, + closed='right', label='right') + + expected = s.groupby(grouplist).agg(func) + self.assertEqual(result.index.name, 'index') + if arg == 'ohlc': + expected = DataFrame(expected.values.tolist()) + expected.columns = ['open', 'high', 'low', 'close'] + expected.index = Index(inds, name='index') + assert_frame_equal(result, expected) + else: + expected.index = inds + assert_series_equal(result, expected) + except BaseException as exc: + + exc.args += ('how=%s' % arg,) + raise + def test_resample_basic_from_daily(self): # from daily dti = DatetimeIndex(
closes #6897 As discussed in issue #6897, here is a pull request implemented "standard error of the mean" (sem) calculations in pandas. This is an extremely common but very simple statistical test. It is used pretty universally in scientific data analysis. sem is implemented in nanops, generic, and groupby. Unit tests are included and the docs have been updated. I have also included an improved .gitignore, based on matplotlib's but including everything from the old .gitignore (hopefully better organized now, though).
https://api.github.com/repos/pandas-dev/pandas/pulls/7133
2014-05-15T13:54:42Z
2014-06-05T10:22:55Z
2014-06-05T10:22:55Z
2014-06-13T13:17:11Z
BUG/API: info repr should honor display.max_info_columns
diff --git a/doc/source/release.rst b/doc/source/release.rst index 50c79b8e8a2bf..973dfd73307f2 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -515,6 +515,7 @@ Bug Fixes - Bug in the HTML repr of a truncated Series or DataFrame not showing the class name with the `large_repr` set to 'info' (:issue:`7105`) - Bug in ``DatetimeIndex`` specifying ``freq`` raises ``ValueError`` when passed value is too short (:issue:`7098`) +- Fixed a bug with the `info` repr not honoring the `display.max_info_columns` setting (:issue:`6939`) pandas 0.13.1 ------------- diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 4c98cb685c901..8182bff92fb63 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -210,6 +210,7 @@ API changes # this now raises for arith ops like ``+``, ``*``, etc. NotImplementedError: operator '*' not implemented for bool dtypes + .. _whatsnew_0140.display: Display Changes @@ -239,6 +240,11 @@ Display Changes length of the series (:issue:`7101`) - Fixed a bug in the HTML repr of a truncated Series or DataFrame not showing the class name with the `large_repr` set to 'info' (:issue:`7105`) +- The `verbose` keyword in ``DataFrame.info()``, which controls whether to shorten the ``info`` + representation, is now ``None`` by default. This will follow the global setting in + ``display.max_info_columns``. The global setting can be overriden with ``verbose=True`` or + ``verbose=False``. +- Fixed a bug with the `info` repr not honoring the `display.max_info_columns` setting (:issue:`6939`) .. _whatsnew_0140.groupby: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index d20cec7aa79ee..1b77a87b0d94a 100755 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1392,17 +1392,20 @@ def to_latex(self, buf=None, columns=None, col_space=None, colSpace=None, if buf is None: return formatter.buf.getvalue() - def info(self, verbose=True, buf=None, max_cols=None): + def info(self, verbose=None, buf=None, max_cols=None): """ Concise summary of a DataFrame. Parameters ---------- - verbose : boolean, default True - If False, don't print column count summary + verbose : {None, True, False}, optional + Whether to print the full summary. + None follows the `display.max_info_columns` setting. + True or False overrides the `display.max_info_columns` setting. buf : writable buffer, defaults to sys.stdout max_cols : int, default None - Determines whether full summary or short summary is printed + Determines whether full summary or short summary is printed. + None follows the `display.max_info_columns` setting. """ from pandas.core.format import _put_lines @@ -1429,8 +1432,10 @@ def info(self, verbose=True, buf=None, max_cols=None): max_rows = get_option('display.max_info_rows', len(self) + 1) show_counts = ((len(self.columns) <= max_cols) and - (len(self) < max_rows)) - if verbose: + (len(self) < max_rows)) + exceeds_info_cols = len(self.columns) > max_cols + + def _verbose_repr(): lines.append('Data columns (total %d columns):' % len(self.columns)) space = max([len(com.pprint_thing(k)) for k in self.columns]) + 4 @@ -1442,22 +1447,33 @@ def info(self, verbose=True, buf=None, max_cols=None): if len(cols) != len(counts): # pragma: no cover raise AssertionError('Columns must equal counts (%d != %d)' % (len(cols), len(counts))) - tmpl = "%s non-null %s" + tmpl = "%s non-null %s" dtypes = self.dtypes for i, col in enumerate(self.columns): dtype = dtypes[col] col = com.pprint_thing(col) - count= "" + count = "" if show_counts: count = counts.iloc[i] lines.append(_put_str(col, space) + tmpl % (count, dtype)) - else: + + def _non_verbose_repr(): lines.append(self.columns.summary(name='Columns')) + if verbose: + _verbose_repr() + elif verbose is False: # specifically set to False, not nesc None + _non_verbose_repr() + else: + if exceeds_info_cols: + _non_verbose_repr() + else: + _verbose_repr() + counts = self.get_dtype_counts() dtypes = ['%s(%d)' % k for k in sorted(compat.iteritems(counts))] lines.append('dtypes: %s' % ', '.join(dtypes)) diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py index f26ea0755ad46..f61bda686c88b 100644 --- a/pandas/tests/test_format.py +++ b/pandas/tests/test_format.py @@ -38,6 +38,12 @@ def has_info_repr(df): c2 = r.split('\n')[0].startswith(r"&lt;class") # _repr_html_ return c1 or c2 +def has_non_verbose_info_repr(df): + has_info = has_info_repr(df) + r = repr(df) + nv = len(r.split('\n')) == 4 # 1. <class>, 2. Index, 3. Columns, 4. dtype + return has_info and nv + def has_horizontally_truncated_repr(df): r = repr(df) return any(l.strip().endswith('...') for l in r.splitlines()) @@ -1573,6 +1579,22 @@ def test_info_repr(self): with option_context('display.large_repr', 'info'): assert has_info_repr(df) + def test_info_repr_max_cols(self): + # GH #6939 + df = DataFrame(randn(10, 5)) + with option_context('display.large_repr', 'info', + 'display.max_columns', 1, + 'display.max_info_columns', 4): + self.assertTrue(has_non_verbose_info_repr(df)) + + with option_context('display.large_repr', 'info', + 'display.max_columns', 1, + 'display.max_info_columns', 5): + self.assertFalse(has_non_verbose_info_repr(df)) + + # test verbose overrides + # fmt.set_option('display.max_info_columns', 4) # exceeded + def test_info_repr_html(self): max_rows = get_option('display.max_rows') max_cols = get_option('display.max_columns') diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 0c1745b41f089..8266feb112ed2 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -6326,6 +6326,41 @@ def test_info_shows_column_dtypes(self): name = '%d %d non-null %s' % (i, n, dtype) assert name in res + def test_info_max_cols(self): + df = DataFrame(np.random.randn(10, 5)) + for len_, verbose in [(4, None), (4, False), (9, True)]: + # For verbose always ^ setting ^ summarize ^ full output + with pd.option_context('max_info_columns', 4): + buf = StringIO() + df.info(buf=buf, verbose=verbose) + res = buf.getvalue() + self.assertEqual(len(res.split('\n')), len_) + + for len_, verbose in [(9, None), (4, False), (9, True)]: + + # max_cols no exceeded + with pd.option_context('max_info_columns', 5): + buf = StringIO() + df.info(buf=buf, verbose=verbose) + res = buf.getvalue() + self.assertEqual(len(res.split('\n')), len_) + + for len_, max_cols in [(9, 5), (4, 4)]: + # setting truncates + with pd.option_context('max_info_columns', 4): + buf = StringIO() + df.info(buf=buf, max_cols=max_cols) + res = buf.getvalue() + self.assertEqual(len(res.split('\n')), len_) + + # setting wouldn't truncate + with pd.option_context('max_info_columns', 5): + buf = StringIO() + df.info(buf=buf, max_cols=max_cols) + res = buf.getvalue() + self.assertEqual(len(res.split('\n')), len_) + + def test_dtypes(self): self.mixed_frame['bool'] = self.mixed_frame['A'] > 0 result = self.mixed_frame.dtypes
Closes https://github.com/pydata/pandas/issues/6939 I think this is what we agreed on. The tests are split up. In `test_format` we do more of the following global settings tests. The rest are in `test_frame` which directly call `df.info()` and I'm basically testing that the kwargs there override the global settings.
https://api.github.com/repos/pandas-dev/pandas/pulls/7130
2014-05-14T18:41:11Z
2014-05-14T19:57:32Z
2014-05-14T19:57:32Z
2016-11-03T12:38:01Z
BUG: cleanup on describe
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 9172d174a1354..04ab4fb14d512 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3573,22 +3573,18 @@ def describe_numeric_1d(series, percentiles): [series.max()]) def describe_categorical_1d(data): - if data.dtype == object: - names = ['count', 'unique'] - objcounts = data.value_counts() - result = [data.count(), len(objcounts)] - if result[1] > 0: + names = ['count', 'unique'] + objcounts = data.value_counts() + result = [data.count(), len(objcounts)] + if result[1] > 0: + top, freq = objcounts.index[0], objcounts.iloc[0] + + if data.dtype == object: names += ['top', 'freq'] - top, freq = objcounts.index[0], objcounts.iloc[0] result += [top, freq] - elif issubclass(data.dtype.type, np.datetime64): - names = ['count', 'unique'] - asint = data.dropna().values.view('i8') - objcounts = compat.Counter(asint) - result = [data.count(), len(objcounts)] - if result[1] > 0: - top, freq = objcounts.most_common(1)[0] + elif issubclass(data.dtype.type, np.datetime64): + asint = data.dropna().values.view('i8') names += ['first', 'last', 'top', 'freq'] result += [lib.Timestamp(asint.min()), lib.Timestamp(asint.max()), diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index 57ec9d0eb8981..148eecbd8dbaf 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -988,30 +988,31 @@ def test_describe_objects(self): assert_frame_equal(result, expected) df = DataFrame({"C1": pd.date_range('2010-01-01', periods=4, freq='D')}) + df.loc[4] = pd.Timestamp('2010-01-04') result = df.describe() - expected = DataFrame({"C1": [4, 4, pd.Timestamp('2010-01-01'), + expected = DataFrame({"C1": [5, 4, pd.Timestamp('2010-01-01'), pd.Timestamp('2010-01-04'), - pd.Timestamp('2010-01-01'), 1]}, + pd.Timestamp('2010-01-04'), 2]}, index=['count', 'unique', 'first', 'last', 'top', 'freq']) assert_frame_equal(result, expected) # mix time and str - df['C2'] = ['a', 'a', 'b', 'c'] + df['C2'] = ['a', 'a', 'b', 'c', 'a'] result = df.describe() # when mix of dateimte / obj the index gets reordered. - expected['C2'] = [4, 3, np.nan, np.nan, 'a', 2] + expected['C2'] = [5, 3, np.nan, np.nan, 'a', 3] assert_frame_equal(result, expected) # just str - expected = DataFrame({'C2': [4, 3, 'a', 2]}, + expected = DataFrame({'C2': [5, 3, 'a', 4]}, index=['count', 'unique', 'top', 'freq']) result = df[['C2']].describe() # mix of time, str, numeric - df['C3'] = [2, 4, 6, 8] + df['C3'] = [2, 4, 6, 8, 2] result = df.describe() - expected = DataFrame({"C3": [4., 5., 2.5819889, 2., 3.5, 5., 6.5, 8.]}, + expected = DataFrame({"C3": [5., 4.4, 2.607681, 2., 2., 4., 6., 8.]}, index=['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max']) assert_frame_equal(result, expected)
use value_counts instead of `Counter` and make the tests unambiguous. Came up in https://github.com/pydata/pandas/pull/7088#issuecomment-43034004
https://api.github.com/repos/pandas-dev/pandas/pulls/7129
2014-05-14T17:28:38Z
2014-05-14T18:22:41Z
2014-05-14T18:22:41Z
2016-11-03T12:37:59Z
Changed the link to my entries in the cookbook
diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst index fa7876c3e28a2..3e2cd654144c9 100644 --- a/doc/source/cookbook.rst +++ b/doc/source/cookbook.rst @@ -405,10 +405,10 @@ using that handle to read. <http://github.com/pydata/pandas/issues/2886>`__ `Dealing with bad lines II -<http://nipunbatra.wordpress.com/2013/06/06/reading-unclean-data-csv-using-pandas/>`__ +<http://nipunbatra.github.io/2013/06/reading-unclean-data-csv-using-pandas/>`__ `Reading CSV with Unix timestamps and converting to local timezone -<http://nipunbatra.wordpress.com/2013/06/07/pandas-reading-csv-with-unix-timestamps-and-converting-to-local-timezone/>`__ +<http://nipunbatra.github.io/2013/06/pandas-reading-csv-with-unix-timestamps-and-converting-to-local-timezone/>`__ `Write a multi-row index CSV without writing duplicates <http://stackoverflow.com/questions/17349574/pandas-write-multiindex-rows-with-to-csv>`__
Updated my blog location where I had kept these entries.
https://api.github.com/repos/pandas-dev/pandas/pulls/7125
2014-05-14T09:17:59Z
2014-05-14T12:50:22Z
2014-05-14T12:50:22Z
2014-07-09T23:34:46Z
API: change default for show_dimensions to 'truncate', related (GH7108, GH6547)
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 48665bc98ab6c..f8c09acaef1fb 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -249,7 +249,7 @@ def mpl_style_cb(key): cf.register_option('encoding', detect_console_encoding(), pc_encoding_doc, validator=is_text) cf.register_option('expand_frame_repr', True, pc_expand_repr_doc) - cf.register_option('show_dimensions', True, pc_show_dimensions_doc, + cf.register_option('show_dimensions', 'truncate', pc_show_dimensions_doc, validator=is_one_of_factory([True, False, 'truncate'])) cf.register_option('chop_threshold', None, pc_chop_threshold_doc) cf.register_option('max_seq_items', 100, pc_max_seq_items) diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py index 75bd03a71466e..f26ea0755ad46 100644 --- a/pandas/tests/test_format.py +++ b/pandas/tests/test_format.py @@ -141,16 +141,16 @@ def test_repr_truncation(self): def test_repr_chop_threshold(self): df = DataFrame([[0.1, 0.5],[0.5, -0.1]]) pd.reset_option("display.chop_threshold") # default None - self.assertEqual(repr(df), ' 0 1\n0 0.1 0.5\n1 0.5 -0.1\n\n[2 rows x 2 columns]') + self.assertEqual(repr(df), ' 0 1\n0 0.1 0.5\n1 0.5 -0.1') with option_context("display.chop_threshold", 0.2 ): - self.assertEqual(repr(df), ' 0 1\n0 0.0 0.5\n1 0.5 0.0\n\n[2 rows x 2 columns]') + self.assertEqual(repr(df), ' 0 1\n0 0.0 0.5\n1 0.5 0.0') with option_context("display.chop_threshold", 0.6 ): - self.assertEqual(repr(df), ' 0 1\n0 0 0\n1 0 0\n\n[2 rows x 2 columns]') + self.assertEqual(repr(df), ' 0 1\n0 0 0\n1 0 0') with option_context("display.chop_threshold", None ): - self.assertEqual(repr(df), ' 0 1\n0 0.1 0.5\n1 0.5 -0.1\n\n[2 rows x 2 columns]') + self.assertEqual(repr(df), ' 0 1\n0 0.1 0.5\n1 0.5 -0.1') def test_repr_obeys_max_seq_limit(self): import pandas.core.common as com @@ -197,7 +197,8 @@ def test_expand_frame_repr(self): with option_context('mode.sim_interactive', True): with option_context('display.max_columns', 10, 'display.width',20, - 'display.max_rows', 20): + 'display.max_rows', 20, + 'display.show_dimensions', True): with option_context('display.expand_frame_repr', True): self.assertFalse(has_truncated_repr(df_small)) self.assertFalse(has_expanded_repr(df_small)) @@ -789,7 +790,7 @@ def test_pprint_thing(self): self.assertTrue(not "\t" in pp_t("a\tb", escape_chars=("\t",))) def test_wide_repr(self): - with option_context('mode.sim_interactive', True): + with option_context('mode.sim_interactive', True, 'display.show_dimensions', True): col = lambda l, k: [tm.rands(k) for _ in range(l)] max_cols = get_option('display.max_columns') df = DataFrame([col(max_cols - 1, 25) for _ in range(10)]) @@ -812,7 +813,7 @@ def test_wide_repr_wide_columns(self): df = DataFrame(randn(5, 3), columns=['a' * 90, 'b' * 90, 'c' * 90]) rep_str = repr(df) - self.assertEqual(len(rep_str.splitlines()), 22) + self.assertEqual(len(rep_str.splitlines()), 20) def test_wide_repr_named(self): with option_context('mode.sim_interactive', True): @@ -1458,6 +1459,7 @@ def test_repr_html(self): self.reset_display_options() df = DataFrame([[1, 2], [3, 4]]) + fmt.set_option('display.show_dimensions', True) self.assertTrue('2 rows' in df._repr_html_()) fmt.set_option('display.show_dimensions', False) self.assertFalse('2 rows' in df._repr_html_()) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 4264f5b7e0931..0c1745b41f089 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -4354,12 +4354,14 @@ def test_repr(self): def test_repr_dimensions(self): df = DataFrame([[1, 2,], [3, 4]]) - self.assertTrue("2 rows x 2 columns" in repr(df)) + with pd.option_context('display.show_dimensions', True): + self.assertTrue("2 rows x 2 columns" in repr(df)) - fmt.set_option('display.show_dimensions', False) - self.assertFalse("2 rows x 2 columns" in repr(df)) + with pd.option_context('display.show_dimensions', False): + self.assertFalse("2 rows x 2 columns" in repr(df)) - self.reset_display_options() + with pd.option_context('display.show_dimensions', 'truncate'): + self.assertFalse("2 rows x 2 columns" in repr(df)) @slow def test_repr_big(self):
xref: #7108 xref: #6547
https://api.github.com/repos/pandas-dev/pandas/pulls/7122
2014-05-14T01:02:39Z
2014-05-14T01:34:28Z
2014-05-14T01:34:28Z
2014-06-25T17:33:33Z
SQL: add release notes for refactor (GH6292)
diff --git a/doc/source/io.rst b/doc/source/io.rst index 9fdf26172cab2..8b4e450ef80c7 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -3160,7 +3160,8 @@ your database. .. versionadded:: 0.14.0 If SQLAlchemy is not installed, a fallback is only provided for sqlite (and -for mysql for backwards compatibility, but this is deprecated). +for mysql for backwards compatibility, but this is deprecated and will be +removed in a future version). This mode requires a Python database adapter which respect the `Python DB-API <http://www.python.org/dev/peps/pep-0249/>`__. @@ -3190,14 +3191,12 @@ engine. You can use a temporary SQLite database where data are stored in To connect with SQLAlchemy you use the :func:`create_engine` function to create an engine object from database URI. You only need to create the engine once per database you are connecting to. - For more information on :func:`create_engine` and the URI formatting, see the examples below and the SQLAlchemy `documentation <http://docs.sqlalchemy.org/en/rel_0_9/core/engines.html>`__ .. ipython:: python from sqlalchemy import create_engine - from pandas.io import sql # Create your connection. engine = create_engine('sqlite:///:memory:') @@ -3280,8 +3279,6 @@ to pass to :func:`pandas.to_datetime`: You can check if a table exists using :func:`~pandas.io.sql.has_table` -In addition, the class :class:`~pandas.io.sql.PandasSQLWithEngine` can be -instantiated directly for more manual control over the SQL interaction. Querying ~~~~~~~~ @@ -3310,18 +3307,18 @@ variant appropriate for your database. .. code-block:: python + from pandas.io import sql sql.execute('SELECT * FROM table_name', engine) - sql.execute('INSERT INTO table_name VALUES(?, ?, ?)', engine, params=[('id', 1, 12.2, True)]) -In addition, the class :class:`~pandas.io.sql.PandasSQLWithEngine` can be -instantiated directly for more manual control over the SQL interaction. - - Engine connection examples ~~~~~~~~~~~~~~~~~~~~~~~~~~ +To connect with SQLAlchemy you use the :func:`create_engine` function to create an engine +object from database URI. You only need to create the engine once per database you are +connecting to. + .. code-block:: python from sqlalchemy import create_engine @@ -3341,6 +3338,8 @@ Engine connection examples # or absolute, starting with a slash: engine = create_engine('sqlite:////absolute/path/to/foo.db') +For more information see the examples the SQLAlchemy `documentation <http://docs.sqlalchemy.org/en/rel_0_9/core/engines.html>`__ + Sqlite fallback ~~~~~~~~~~~~~~~ @@ -3354,16 +3353,14 @@ You can create connections like so: .. code-block:: python import sqlite3 - from pandas.io import sql - cnx = sqlite3.connect(':memory:') + con = sqlite3.connect(':memory:') And then issue the following queries: .. code-block:: python data.to_sql('data', cnx) - - sql.read_sql("SELECT * FROM data", cnx) + pd.read_sql_query("SELECT * FROM data", con) .. _io.bigquery: diff --git a/doc/source/release.rst b/doc/source/release.rst index 739d8ba46ec4e..01468e35a037c 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -66,6 +66,8 @@ New features in pandas the actual range of dates that you can use is 1678 AD to 2262 AD. (:issue:`4041`) - Added error bar support to the ``.plot`` method of ``DataFrame`` and ``Series`` (:issue:`3796`, :issue:`6834`) - Implemented ``Panel.pct_change`` (:issue:`6904`) +- The SQL reading and writing functions now support more database flavors + through SQLAlchemy (:issue:`2717`, :issue:`4163`, :issue:`5950`, :issue:`6292`). API Changes ~~~~~~~~~~~ @@ -257,6 +259,8 @@ Deprecations - The support for the 'mysql' flavor when using DBAPI connection objects has been deprecated. MySQL will be further supported with SQLAlchemy engines (:issue:`6900`). +- The following ``io.sql`` functions have been deprecated: ``tquery``, ``uquery``, ``read_frame``, ``frame_query``, ``write_frame``. + - The `percentile_width` keyword argument in :meth:`~DataFrame.describe` has been deprecated. Use the `percentiles` keyword instead, which takes a list of percentiles to display. The default output is unchanged. diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 69da6d52d21ff..5f9a2711114c2 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -340,6 +340,71 @@ More consistent behaviour for some groupby methods: SQL ~~~ +The SQL reading and writing functions now support more database flavors +through SQLAlchemy (:issue:`2717`, :issue:`4163`, :issue:`5950`, :issue:`6292`). +All databases supported by SQLAlchemy can be used, such +as PostgreSQL, MySQL, Oracle, Microsoft SQL server (see documentation of +SQLAlchemy on `included dialects +<http://sqlalchemy.readthedocs.org/en/latest/dialects/index.html>`_). + +The functionality of providing DBAPI connection objects will only be supported +for sqlite3 in the future. The ``'mysql'`` flavor is deprecated. + +The new functions :func:`~pandas.read_sql_query` and :func:`~pandas.read_sql_table` +are introduced. The function :func:`~pandas.read_sql` is kept as a convenience +wrapper around the other two and will delegate to specific function depending on +the provided input (database table name or sql query). + +In practice, you have to provide a SQLAlchemy ``engine`` to the sql functions. +To connect with SQLAlchemy you use the :func:`create_engine` function to create an engine +object from database URI. You only need to create the engine once per database you are +connecting to. For an in-memory sqlite database: + +.. ipython:: python + + from sqlalchemy import create_engine + # Create your connection. + engine = create_engine('sqlite:///:memory:') + +This ``engine`` can then be used to write or read data to/from this database: + +.. ipython:: python + + df = pd.DataFrame({'A': [1,2,3], 'B': ['a', 'b', 'c']}) + df.to_sql('db_table', engine, index=False) + +You can read data from a database by specifying the table name: + +.. ipython:: python + + pd.read_sql_table('db_table', engine) + +or by specifying a sql query: + +.. ipython:: python + + pd.read_sql_query('SELECT * FROM db_table', engine) + +Some other enhancements to the sql functions include: + +- support for writing the index. This can be controlled with the ``index`` + keyword (default is True). +- specify the column label to use when writing the index with ``index_label``. +- specify string columns to parse as datetimes withh the ``parse_dates`` + keyword in :func:`~pandas.read_sql_query` and :func:`~pandas.read_sql_table`. + +.. warning:: + + Some of the existing functions or function aliases have been deprecated + and will be removed in future versions. This includes: ``tquery``, ``uquery``, + ``read_frame``, ``frame_query``, ``write_frame``. + +.. warning:: + + The support for the 'mysql' flavor when using DBAPI connection objects has been deprecated. + MySQL will be further supported with SQLAlchemy engines (:issue:`6900`). + + .. _whatsnew_0140.slicers: MultiIndexing Using Slicers @@ -573,6 +638,8 @@ Deprecations - The support for the 'mysql' flavor when using DBAPI connection objects has been deprecated. MySQL will be further supported with SQLAlchemy engines (:issue:`6900`). + - The following ``io.sql`` functions have been deprecated: ``tquery``, ``uquery``, ``read_frame``, ``frame_query``, ``write_frame``. + - The `percentile_width` keyword argument in :meth:`~DataFrame.describe` has been deprecated. Use the `percentiles` keyword instead, which takes a list of percentiles to display. The default output is unchanged. diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 04ab4fb14d512..0e5ca6afdb56b 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -928,10 +928,11 @@ def to_sql(self, name, con, flavor='sqlite', if_exists='fail', index=True, con : SQLAlchemy engine or DBAPI2 connection (legacy mode) Using SQLAlchemy makes it possible to use any DB supported by that library. - If a DBAPI2 object is given, a supported SQL flavor must also be provided + If a DBAPI2 object, only sqlite3 is supported. flavor : {'sqlite', 'mysql'}, default 'sqlite' The flavor of SQL to use. Ignored when using SQLAlchemy engine. - Required when using DBAPI2 connection. + 'mysql' is deprecated and will be removed in future versions, but it + will be further supported through SQLAlchemy engines. if_exists : {'fail', 'replace', 'append'}, default 'fail' - fail: If table exists, do nothing. - replace: If table exists, drop it, recreate it, and insert data. diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 347370eaee92f..aa08c95c4f1c3 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -221,8 +221,8 @@ def uquery(sql, con=None, cur=None, retry=True, params=None): #------------------------------------------------------------------------------ #--- Read and write to DataFrames -def read_sql_table(table_name, con, meta=None, index_col=None, - coerce_float=True, parse_dates=None, columns=None): +def read_sql_table(table_name, con, index_col=None, coerce_float=True, + parse_dates=None, columns=None): """Read SQL database table into a DataFrame. Given a table name and an SQLAlchemy engine, returns a DataFrame. @@ -234,8 +234,6 @@ def read_sql_table(table_name, con, meta=None, index_col=None, Name of SQL table in database con : SQLAlchemy engine Sqlite DBAPI conncection mode not supported - meta : SQLAlchemy meta, optional - If omitted MetaData is reflected from engine index_col : string, optional Column to set as index coerce_float : boolean, default True @@ -264,7 +262,7 @@ def read_sql_table(table_name, con, meta=None, index_col=None, """ - pandas_sql = PandasSQLAlchemy(con, meta=meta) + pandas_sql = PandasSQLAlchemy(con) table = pandas_sql.read_table( table_name, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns) @@ -292,11 +290,10 @@ def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None, library. If a DBAPI2 object, only sqlite3 is supported. index_col : string, optional - column name to use for the returned DataFrame object. + Column name to use as index for the returned DataFrame object. coerce_float : boolean, default True Attempt to convert values to non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets - cur : depreciated, cursor is obtained from connection params : list, tuple or dict, optional List of parameters to pass to execute method. parse_dates : list or dict @@ -325,8 +322,8 @@ def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None, parse_dates=parse_dates) -def read_sql(sql, con, index_col=None, flavor='sqlite', coerce_float=True, - params=None, parse_dates=None, columns=None): +def read_sql(sql, con, index_col=None, coerce_float=True, params=None, + parse_dates=None, columns=None): """ Read SQL query or database table into a DataFrame. @@ -339,15 +336,10 @@ def read_sql(sql, con, index_col=None, flavor='sqlite', coerce_float=True, library. If a DBAPI2 object, only sqlite3 is supported. index_col : string, optional - column name to use for the returned DataFrame object. - flavor : string, {'sqlite', 'mysql'} - The flavor of SQL to use. Ignored when using - SQLAlchemy engine. Required when using DBAPI2 connection. - 'mysql' is still supported, but will be removed in future versions. + column name to use as index for the returned DataFrame object. coerce_float : boolean, default True Attempt to convert values to non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets - cur : depreciated, cursor is obtained from connection params : list, tuple or dict, optional List of parameters to pass to execute method. parse_dates : list or dict @@ -360,7 +352,8 @@ def read_sql(sql, con, index_col=None, flavor='sqlite', coerce_float=True, Especially useful with databases without native Datetime support, such as SQLite columns : list - List of column names to select from sql table + List of column names to select from sql table (only used when reading + a table). Returns ------- @@ -379,7 +372,7 @@ def read_sql(sql, con, index_col=None, flavor='sqlite', coerce_float=True, read_sql_query : Read SQL query into a DataFrame """ - pandas_sql = pandasSQL_builder(con, flavor=flavor) + pandas_sql = pandasSQL_builder(con) if 'select' in sql.lower(): try: @@ -419,8 +412,8 @@ def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True, If a DBAPI2 object, only sqlite3 is supported. flavor : {'sqlite', 'mysql'}, default 'sqlite' The flavor of SQL to use. Ignored when using SQLAlchemy engine. - Required when using DBAPI2 connection. - 'mysql' is still supported, but will be removed in future versions. + 'mysql' is deprecated and will be removed in future versions, but it + will be further supported through SQLAlchemy engines. if_exists : {'fail', 'replace', 'append'}, default 'fail' - fail: If table exists, do nothing. - replace: If table exists, drop it, recreate it, and insert data. @@ -461,8 +454,8 @@ def has_table(table_name, con, flavor='sqlite'): If a DBAPI2 object, only sqlite3 is supported. flavor: {'sqlite', 'mysql'}, default 'sqlite' The flavor of SQL to use. Ignored when using SQLAlchemy engine. - Required when using DBAPI2 connection. - 'mysql' is still supported, but will be removed in future versions. + 'mysql' is deprecated and will be removed in future versions, but it + will be further supported through SQLAlchemy engines. Returns ------- @@ -1090,15 +1083,23 @@ def _create_sql_schema(self, frame, table_name): def get_schema(frame, name, flavor='sqlite', keys=None, con=None): """ - Get the SQL db table schema for the given frame + Get the SQL db table schema for the given frame. Parameters ---------- frame : DataFrame - name : name of SQL table + name : string + name of SQL table flavor : {'sqlite', 'mysql'}, default 'sqlite' - keys : columns to use a primary key + The flavor of SQL to use. Ignored when using SQLAlchemy engine. + 'mysql' is deprecated and will be removed in future versions, but it + will be further supported through SQLAlchemy engines. + keys : string or sequence + columns to use a primary key con: an open SQL database connection object or an SQLAlchemy engine + Using SQLAlchemy makes it possible to use any DB supported by that + library. + If a DBAPI2 object, only sqlite3 is supported. """ diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index 6058c0923e3c1..a47feceb7f233 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -340,7 +340,7 @@ def test_read_sql_iris(self): def test_legacy_read_frame(self): with tm.assert_produces_warning(FutureWarning): iris_frame = sql.read_frame( - "SELECT * FROM iris", self.conn, flavor='sqlite') + "SELECT * FROM iris", self.conn) self._check_iris_loaded_frame(iris_frame) def test_to_sql(self):
This should (finally) close #6292. WIP for now, but comments welcome. Have to add to release.rst. @jreback Should I mark the new sql as experimental? (and if so, where should I do this?)
https://api.github.com/repos/pandas-dev/pandas/pulls/7120
2014-05-13T20:39:16Z
2014-05-17T00:04:46Z
2014-05-17T00:04:45Z
2014-06-18T01:56:14Z
TST: numpy 1.9 -dev exception changes indatetime64 indexing (GH7116)
diff --git a/pandas/__init__.py b/pandas/__init__.py index 442c6b08c9dce..6eda049835526 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -30,6 +30,7 @@ _np_version_under1p6 = LooseVersion(_np_version) < '1.6' _np_version_under1p7 = LooseVersion(_np_version) < '1.7' _np_version_under1p8 = LooseVersion(_np_version) < '1.8' +_np_version_under1p9 = LooseVersion(_np_version) < '1.9' from pandas.version import version as __version__ from pandas.info import __doc__ diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index 419ab48a01a07..8f8ce897f6c6d 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -23,7 +23,7 @@ from numpy.random import randn from pandas.compat import range, lrange, lmap, zip -from pandas import Series, TimeSeries, DataFrame +from pandas import Series, TimeSeries, DataFrame, _np_version_under1p9 from pandas.util.testing import(assert_series_equal, assert_almost_equal, assertRaisesRegexp) import pandas.util.testing as tm @@ -1862,8 +1862,17 @@ def test_getitem_day(self): values = ['2014', '2013/02', '2013/01/02', '2013/02/01 9H', '2013/02/01 09:00'] for v in values: - with tm.assertRaises(ValueError): - idx[v] + + if _np_version_under1p9: + with tm.assertRaises(ValueError): + idx[v] + else: + # GH7116 + # these show deprecations as we are trying + # to slice with non-integer indexers + #with tm.assertRaises(IndexError): + # idx[v] + continue s = Series(np.random.rand(len(idx)), index=idx) assert_series_equal(s['2013/01'], s[0:31]) @@ -1910,8 +1919,16 @@ def test_getitem_seconds(self): values = ['2014', '2013/02', '2013/01/02', '2013/02/01 9H', '2013/02/01 09:00'] for v in values: - with tm.assertRaises(ValueError): - idx[v] + if _np_version_under1p9: + with tm.assertRaises(ValueError): + idx[v] + else: + # GH7116 + # these show deprecations as we are trying + # to slice with non-integer indexers + #with tm.assertRaises(IndexError): + # idx[v] + continue s = Series(np.random.rand(len(idx)), index=idx) @@ -2291,7 +2308,7 @@ def test_factorize(self): exp_arr = np.array([0, 0, 1, 1, 2, 2]) exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M') - + arr, idx = idx1.factorize() self.assert_numpy_array_equal(arr, exp_arr) self.assert_(idx.equals(exp_idx)) @@ -2303,7 +2320,7 @@ def test_factorize(self): idx2 = pd.PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01', '2014-03', '2014-01'], freq='M') - exp_arr = np.array([2, 2, 1, 0, 2, 0]) + exp_arr = np.array([2, 2, 1, 0, 2, 0]) arr, idx = idx2.factorize(sort=True) self.assert_numpy_array_equal(arr, exp_arr) self.assert_(idx.equals(exp_idx))
closes #7116
https://api.github.com/repos/pandas-dev/pandas/pulls/7118
2014-05-13T17:47:22Z
2014-05-13T22:20:39Z
2014-05-13T22:20:39Z
2014-06-28T11:19:11Z
BUG: Regression in the display of a MultiIndexed Series with display.max_rows (GH7101)
diff --git a/doc/source/release.rst b/doc/source/release.rst index b12f4eca010d9..946bdcd48cfaf 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -337,6 +337,8 @@ Improvements to existing features - ``GroupBy.count()`` is now implemented in Cython and is much faster for large numbers of groups (:issue:`7016`). - ``boxplot`` now supports ``layout`` keyword (:issue:`6769`) +- Regression in the display of a MultiIndexed Series with ``display.max_rows`` is less than the + length of the series (:issue:`7101`) .. _release.bug_fixes-0.14.0: diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index ca8e714bdbef8..725033ebcb82b 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -202,6 +202,9 @@ API changes Display Changes ~~~~~~~~~~~~~~~ +- Regression in the display of a MultiIndexed Series with ``display.max_rows`` is less than the + length of the series (:issue:`7101`) + .. _whatsnew_0140.groupby: Groupby API Changes diff --git a/pandas/core/series.py b/pandas/core/series.py index 23d267601b3a2..d4b6039cd375e 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -848,7 +848,11 @@ def _tidy_repr(self, max_vals=20): Internal function, should always return unicode string """ - num = max_vals // 2 + if max_vals > 1: + num = max_vals // 2 + else: + num = 1 + max_vals = 2 head = self.iloc[:num]._get_repr(print_header=True, length=False, dtype=False, name=False) tail = self.iloc[-(max_vals - num):]._get_repr(print_header=False, diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py index 3404e4ddb7d44..d4202802b3d71 100644 --- a/pandas/tests/test_format.py +++ b/pandas/tests/test_format.py @@ -13,7 +13,7 @@ from numpy.random import randn import numpy as np -from pandas import DataFrame, Series, Index, _np_version_under1p7, Timestamp +from pandas import DataFrame, Series, Index, _np_version_under1p7, Timestamp, MultiIndex import pandas.core.format as fmt import pandas.util.testing as tm @@ -2057,6 +2057,42 @@ def test_mixed_datetime64(self): result = repr(df.ix[0]) self.assertTrue('2012-01-01' in result) + def test_max_multi_index_display(self): + # GH 7101 + + # doc example (indexing.rst) + + # multi-index + arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'], + ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']] + tuples = list(zip(*arrays)) + index = MultiIndex.from_tuples(tuples, names=['first', 'second']) + s = Series(randn(8), index=index) + + with option_context("display.max_rows", 10): + self.assertEquals(len(str(s).split('\n')),10) + with option_context("display.max_rows", 3): + self.assertEquals(len(str(s).split('\n')),5) + with option_context("display.max_rows", 2): + self.assertEquals(len(str(s).split('\n')),5) + with option_context("display.max_rows", 1): + self.assertEquals(len(str(s).split('\n')),5) + with option_context("display.max_rows", 0): + self.assertEquals(len(str(s).split('\n')),10) + + # index + s = Series(randn(8), None) + + with option_context("display.max_rows", 10): + self.assertEquals(len(str(s).split('\n')),9) + with option_context("display.max_rows", 3): + self.assertEquals(len(str(s).split('\n')),4) + with option_context("display.max_rows", 2): + self.assertEquals(len(str(s).split('\n')),4) + with option_context("display.max_rows", 1): + self.assertEquals(len(str(s).split('\n')),4) + with option_context("display.max_rows", 0): + self.assertEquals(len(str(s).split('\n')),9) class TestEngFormatter(tm.TestCase): _multiprocess_can_split_ = True
closes #7101
https://api.github.com/repos/pandas-dev/pandas/pulls/7114
2014-05-13T15:46:02Z
2014-05-13T16:27:19Z
2014-05-13T16:27:19Z
2014-06-30T14:19:44Z
ENH: add nlargest nsmallest to Series
diff --git a/doc/source/basics.rst b/doc/source/basics.rst index ca9751569336c..5aa84f46debea 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -1311,6 +1311,21 @@ Some other sorting notes / nuances: compatibility with NumPy methods which expect the ``ndarray.sort`` behavior. +.. versionadded:: 0.14.0 + +``Series`` has the ``nsmallest`` and ``nlargest`` methods which return the +smallest or largest :math:`n` values. For a large ``Series`` this can be much +faster than sorting the entire Series and calling ``head(n)`` on the result. + +.. ipython:: python + + s = Series(np.random.permutation(10)) + s + s.order() + s.nsmallest(3) + s.nlargest(3) + + Sorting by a multi-index column ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 8182bff92fb63..c033debbb6808 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -643,6 +643,7 @@ Enhancements values='Quantity', aggfunc=np.sum) - str.wrap implemented (:issue:`6999`) +- Add nsmallest and nlargest Series methods (:issue:`3960`) - `PeriodIndex` fully supports partial string indexing like `DatetimeIndex` (:issue:`7043`) diff --git a/pandas/algos.pyx b/pandas/algos.pyx index 3b527740505e4..431ef97debae6 100644 --- a/pandas/algos.pyx +++ b/pandas/algos.pyx @@ -21,6 +21,9 @@ from numpy cimport NPY_FLOAT16 as NPY_float16 from numpy cimport NPY_FLOAT32 as NPY_float32 from numpy cimport NPY_FLOAT64 as NPY_float64 +from numpy cimport (int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, + uint32_t, uint64_t, float16_t, float32_t, float64_t) + int8 = np.dtype(np.int8) int16 = np.dtype(np.int16) int32 = np.dtype(np.int32) @@ -736,16 +739,43 @@ def _check_minp(win, minp, N): # Physical description: 366 p. # Series: Prentice-Hall Series in Automatic Computation -def kth_smallest(ndarray[double_t] a, Py_ssize_t k): - cdef: - Py_ssize_t i,j,l,m,n - double_t x, t - n = len(a) +ctypedef fused numeric: + int8_t + int16_t + int32_t + int64_t + + uint8_t + uint16_t + uint32_t + uint64_t + + float32_t + float64_t + + +cdef inline Py_ssize_t swap(numeric *a, numeric *b) except -1: + cdef numeric t + + # cython doesn't allow pointer dereference so use array syntax + t = a[0] + a[0] = b[0] + b[0] = t + return 0 + + +@cython.boundscheck(False) +@cython.wraparound(False) +cpdef numeric kth_smallest(numeric[:] a, Py_ssize_t k): + cdef: + Py_ssize_t i, j, l, m, n = a.size + numeric x l = 0 - m = n-1 - while (l<m): + m = n - 1 + + while l < m: x = a[k] i = l j = m @@ -754,9 +784,7 @@ def kth_smallest(ndarray[double_t] a, Py_ssize_t k): while a[i] < x: i += 1 while x < a[j]: j -= 1 if i <= j: - t = a[i] - a[i] = a[j] - a[j] = t + swap(&a[i], &a[j]) i += 1; j -= 1 if i > j: break @@ -765,6 +793,7 @@ def kth_smallest(ndarray[double_t] a, Py_ssize_t k): if k < i: m = j return a[k] + cdef inline kth_smallest_c(float64_t* a, Py_ssize_t k, Py_ssize_t n): cdef: Py_ssize_t i,j,l,m @@ -781,9 +810,7 @@ cdef inline kth_smallest_c(float64_t* a, Py_ssize_t k, Py_ssize_t n): while a[i] < x: i += 1 while x < a[j]: j -= 1 if i <= j: - t = a[i] - a[i] = a[j] - a[j] = t + swap(&a[i], &a[j]) i += 1; j -= 1 if i > j: break @@ -793,22 +820,22 @@ cdef inline kth_smallest_c(float64_t* a, Py_ssize_t k, Py_ssize_t n): return a[k] -def median(ndarray arr): +cpdef numeric median(numeric[:] arr): ''' A faster median ''' - cdef int n = len(arr) + cdef Py_ssize_t n = arr.size - if len(arr) == 0: + if n == 0: return np.NaN arr = arr.copy() if n % 2: - return kth_smallest(arr, n / 2) + return kth_smallest(arr, n // 2) else: - return (kth_smallest(arr, n / 2) + - kth_smallest(arr, n / 2 - 1)) / 2 + return (kth_smallest(arr, n // 2) + + kth_smallest(arr, n // 2 - 1)) / 2 # -------------- Min, Max subsequence @@ -2226,7 +2253,7 @@ cdef inline float64_t _median_linear(float64_t* a, int n): if n % 2: - result = kth_smallest_c(a, n / 2, n) + result = kth_smallest_c( a, n / 2, n) else: result = (kth_smallest_c(a, n / 2, n) + kth_smallest_c(a, n / 2 - 1, n)) / 2 diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 002d5480b9b7b..954f18ccb69b8 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -9,9 +9,7 @@ import pandas.core.common as com import pandas.algos as algos import pandas.hashtable as htable -import pandas.compat as compat -from pandas.compat import filter, string_types -from pandas.util.decorators import deprecate_kwarg +from pandas.compat import string_types def match(to_match, values, na_sentinel=-1): """ @@ -413,6 +411,90 @@ def group_position(*args): return result +_dtype_map = {'datetime64[ns]': 'int64', 'timedelta64[ns]': 'int64'} + + +def _finalize_nsmallest(arr, kth_val, n, take_last, narr): + ns, = np.nonzero(arr <= kth_val) + inds = ns[arr[ns].argsort(kind='mergesort')][:n] + + if take_last: + # reverse indices + return narr - 1 - inds + return inds + + +def nsmallest(arr, n, take_last=False): + ''' + Find the indices of the n smallest values of a numpy array. + + Note: Fails silently with NaN. + + ''' + if take_last: + arr = arr[::-1] + + narr = len(arr) + n = min(n, narr) + + sdtype = str(arr.dtype) + arr = arr.view(_dtype_map.get(sdtype, sdtype)) + + kth_val = algos.kth_smallest(arr.copy(), n - 1) + return _finalize_nsmallest(arr, kth_val, n, take_last, narr) + + +def nlargest(arr, n, take_last=False): + """ + Find the indices of the n largest values of a numpy array. + + Note: Fails silently with NaN. + """ + sdtype = str(arr.dtype) + arr = arr.view(_dtype_map.get(sdtype, sdtype)) + return nsmallest(-arr, n, take_last=take_last) + + +def select_n_slow(dropped, n, take_last, method): + reverse_it = take_last or method == 'nlargest' + ascending = method == 'nsmallest' + slc = np.s_[::-1] if reverse_it else np.s_[:] + return dropped[slc].order(ascending=ascending).head(n) + + +_select_methods = {'nsmallest': nsmallest, 'nlargest': nlargest} + + +def select_n(series, n, take_last, method): + """Implement n largest/smallest. + + Parameters + ---------- + n : int + take_last : bool + method : str, {'nlargest', 'nsmallest'} + + Returns + ------- + nordered : Series + """ + dtype = series.dtype + if not issubclass(dtype.type, (np.integer, np.floating, np.datetime64, + np.timedelta64)): + raise TypeError("Cannot use method %r with dtype %s" % (method, dtype)) + + if n <= 0: + return series[[]] + + dropped = series.dropna() + + if n >= len(series): + return select_n_slow(dropped, n, take_last, method) + + inds = _select_methods[method](dropped.values, n, take_last) + return dropped.iloc[inds] + + _rank1d_functions = { 'float64': algos.rank_1d_float64, 'int64': algos.rank_1d_int64, diff --git a/pandas/core/series.py b/pandas/core/series.py index d95f8da8097e9..1637ba49d86a2 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -6,7 +6,6 @@ # pylint: disable=E1101,E1103 # pylint: disable=W0703,W0622,W0613,W0201 -import operator import types import warnings @@ -15,21 +14,16 @@ import numpy.ma as ma from pandas.core.common import (isnull, notnull, _is_bool_indexer, - _default_index, _maybe_promote, _maybe_upcast, - _asarray_tuplesafe, is_integer_dtype, - _NS_DTYPE, _TD_DTYPE, - _infer_dtype_from_scalar, is_list_like, - _values_from_object, + _default_index, _maybe_upcast, + _asarray_tuplesafe, _infer_dtype_from_scalar, + is_list_like, _values_from_object, _possibly_cast_to_datetime, _possibly_castable, - _possibly_convert_platform, - _try_sort, + _possibly_convert_platform, _try_sort, ABCSparseArray, _maybe_match_name, _ensure_object, SettingWithCopyError) from pandas.core.index import (Index, MultiIndex, InvalidIndexError, _ensure_index) -from pandas.core.indexing import ( - _check_bool_indexer, - _is_index_slice, _maybe_convert_indices) +from pandas.core.indexing import _check_bool_indexer, _maybe_convert_indices from pandas.core import generic, base from pandas.core.internals import SingleBlockManager from pandas.core.categorical import Categorical @@ -37,16 +31,17 @@ from pandas.tseries.period import PeriodIndex, Period from pandas import compat from pandas.util.terminal import get_terminal_size -from pandas.compat import zip, lzip, u, OrderedDict +from pandas.compat import zip, u, OrderedDict import pandas.core.array as pa import pandas.core.ops as ops +from pandas.core.algorithms import select_n import pandas.core.common as com import pandas.core.datetools as datetools import pandas.core.format as fmt import pandas.core.nanops as nanops -from pandas.util.decorators import Appender, Substitution, cache_readonly +from pandas.util.decorators import Appender, cache_readonly import pandas.lib as lib import pandas.tslib as tslib @@ -1728,6 +1723,72 @@ def _try_kind_sort(arr): else: return result.__finalize__(self) + def nlargest(self, n=5, take_last=False): + """Return the largest `n` elements. + + Parameters + ---------- + n : int + Return this many descending sorted values + take_last : bool + Where there are duplicate values, take the last duplicate + + Returns + ------- + top_n : Series + The n largest values in the Series, in sorted order + + Notes + ----- + Faster than ``.order(ascending=False).head(n)`` for small `n` relative + to the size of the ``Series`` object. + + See Also + -------- + Series.nsmallest + + Examples + -------- + >>> import pandas as pd + >>> import numpy as np + >>> s = pd.Series(np.random.randn(1e6)) + >>> s.nlargest(10) # only sorts up to the N requested + """ + return select_n(self, n=n, take_last=take_last, method='nlargest') + + def nsmallest(self, n=5, take_last=False): + """Return the smallest `n` elements. + + Parameters + ---------- + n : int + Return this many ascending sorted values + take_last : bool + Where there are duplicate values, take the last duplicate + + Returns + ------- + bottom_n : Series + The n smallest values in the Series, in sorted order + + Notes + ----- + Faster than ``.order().head(n)`` for small `n` relative to + the size of the ``Series`` object. + + See Also + -------- + Series.nlargest + + Examples + -------- + >>> import pandas as pd + >>> import numpy as np + >>> s = pd.Series(np.random.randn(1e6)) + >>> s.nsmallest(10) # only sorts up to the N requested + """ + return select_n(self, n=n, take_last=take_last, method='nsmallest') + def sortlevel(self, level=0, ascending=True, sort_remaining=True): """ Sort Series with MultiIndex by chosen level. Data will be diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 6e7c9edfc4025..434c21bfa76de 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -3998,6 +3998,68 @@ def test_order(self): ordered = ts.order(ascending=False, na_position='first') assert_almost_equal(expected, ordered.valid().values) + def test_nsmallest_nlargest(self): + # float, int, datetime64 (use i8), timedelts64 (same), + # object that are numbers, object that are strings + + base = [3, 2, 1, 2, 5] + + s_list = [ + Series(base, dtype='int8'), + Series(base, dtype='int16'), + Series(base, dtype='int32'), + Series(base, dtype='int64'), + Series(base, dtype='float32'), + Series(base, dtype='float64'), + Series(base, dtype='uint8'), + Series(base, dtype='uint16'), + Series(base, dtype='uint32'), + Series(base, dtype='uint64'), + Series(base).astype('timedelta64[ns]'), + Series(pd.to_datetime(['2003', '2002', '2001', '2002', '2005'])), + ] + + raising = [ + Series([3., 2, 1, 2, '5'], dtype='object'), + Series([3., 2, 1, 2, 5], dtype='object'), + Series([3., 2, 1, 2, 5], dtype='complex256'), + Series([3., 2, 1, 2, 5], dtype='complex128'), + ] + + for r in raising: + dt = r.dtype + msg = "Cannot use method 'n(larg|small)est' with dtype %s" % dt + args = 2, len(r), 0, -1 + methods = r.nlargest, r.nsmallest + for method, arg in product(methods, args): + with tm.assertRaisesRegexp(TypeError, msg): + method(arg) + + for s in s_list: + + assert_series_equal(s.nsmallest(2), s.iloc[[2, 1]]) + assert_series_equal(s.nsmallest(2, take_last=True), s.iloc[[2, 3]]) + + assert_series_equal(s.nlargest(3), s.iloc[[4, 0, 1]]) + assert_series_equal(s.nlargest(3, take_last=True), + s.iloc[[4, 0, 3]]) + + empty = s.iloc[0:0] + assert_series_equal(s.nsmallest(0), empty) + assert_series_equal(s.nsmallest(-1), empty) + assert_series_equal(s.nlargest(0), empty) + assert_series_equal(s.nlargest(-1), empty) + + assert_series_equal(s.nsmallest(len(s)), s.order()) + assert_series_equal(s.nsmallest(len(s) + 1), s.order()) + assert_series_equal(s.nlargest(len(s)), s.iloc[[4, 0, 1, 3, 2]]) + assert_series_equal(s.nlargest(len(s) + 1), + s.iloc[[4, 0, 1, 3, 2]]) + + s = Series([3., np.nan, 1, 2, 5]) + assert_series_equal(s.nlargest(), s.iloc[[4, 0, 3, 2]]) + assert_series_equal(s.nsmallest(), s.iloc[[2, 3, 0, 4]]) + def test_rank(self): from pandas.compat.scipy import rankdata diff --git a/pandas/tools/util.py b/pandas/tools/util.py index 6dbefc4b70930..1d6ed3e11c81e 100644 --- a/pandas/tools/util.py +++ b/pandas/tools/util.py @@ -1,6 +1,9 @@ +import operator from pandas.compat import reduce from pandas.core.index import Index import numpy as np +from pandas import algos + def match(needles, haystack): haystack = Index(haystack) @@ -17,7 +20,7 @@ def cartesian_product(X): -------- >>> cartesian_product([list('ABC'), [1, 2]]) [array(['A', 'A', 'B', 'B', 'C', 'C'], dtype='|S1'), - array([1, 2, 1, 2, 1, 2])] + array([1, 2, 1, 2, 1, 2])] ''' diff --git a/vb_suite/series_methods.py b/vb_suite/series_methods.py new file mode 100644 index 0000000000000..1659340cfe050 --- /dev/null +++ b/vb_suite/series_methods.py @@ -0,0 +1,29 @@ +from vbench.api import Benchmark +from datetime import datetime + +common_setup = """from pandas_vb_common import * +""" + +setup = common_setup + """ +s1 = Series(np.random.randn(10000)) +s2 = Series(np.random.randint(1, 10, 10000)) +""" + +series_nlargest1 = Benchmark('s1.nlargest(3, take_last=True);' + 's1.nlargest(3, take_last=False)', + setup, + start_date=datetime(2014, 1, 25)) +series_nlargest2 = Benchmark('s2.nlargest(3, take_last=True);' + 's2.nlargest(3, take_last=False)', + setup, + start_date=datetime(2014, 1, 25)) + +series_nsmallest2 = Benchmark('s1.nsmallest(3, take_last=True);' + 's1.nsmallest(3, take_last=False)', + setup, + start_date=datetime(2014, 1, 25)) + +series_nsmallest2 = Benchmark('s2.nsmallest(3, take_last=True);' + 's2.nsmallest(3, take_last=False)', + setup, + start_date=datetime(2014, 1, 25))
closes #3960 xref: #5534
https://api.github.com/repos/pandas-dev/pandas/pulls/7113
2014-05-13T14:29:25Z
2014-05-14T21:21:48Z
2014-05-14T21:21:48Z
2014-07-22T18:13:46Z
BUG: tzinfo lost when concatenating multiindex arrays
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 5e1d237b2b559..a2ea345051afd 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -70,3 +70,4 @@ Bug Fixes - Bug in ``DataFrame`` and ``Series`` bar and barh plot raises ``TypeError`` when ``bottom`` and ``left`` keyword is specified (:issue:`7226`) - BUG in ``DataFrame.hist`` raises ``TypeError`` when it contains non numeric column (:issue:`7277`) +- Bug in ``MultiIndex.append``, ``concat`` and ``pivot_table`` don't preserve timezone (:issue:`6606`) diff --git a/pandas/core/index.py b/pandas/core/index.py index 02d6e983f5183..6eac395b1ecad 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -2948,6 +2948,14 @@ def append(self, other): if not isinstance(other, (list, tuple)): other = [other] + if all((isinstance(o, MultiIndex) and o.nlevels >= self.nlevels) for o in other): + arrays = [] + for i in range(self.nlevels): + label = self.get_level_values(i) + appended = [o.get_level_values(i) for o in other] + arrays.append(label.append(appended)) + return MultiIndex.from_arrays(arrays, names=self.names) + to_concat = (self.values,) + tuple(k.values for k in other) new_tuples = np.concatenate(to_concat) diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index d02e52715a735..07b753b6724d8 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -1,4 +1,5 @@ # pylint: disable-msg=W0612,E1101,W0141 +import datetime import nose from numpy.random import randn @@ -70,6 +71,46 @@ def test_append(self): result = a['A'].append(b['A']) tm.assert_series_equal(result, self.frame['A']) + def test_append_index(self): + + idx1 = Index([1.1, 1.2, 1.3]) + idx2 = pd.date_range('2011-01-01', freq='D', periods=3, tz='Asia/Tokyo') + idx3 = Index(['A', 'B', 'C']) + + midx_lv2 = MultiIndex.from_arrays([idx1, idx2]) + midx_lv3 = MultiIndex.from_arrays([idx1, idx2, idx3]) + + result = idx1.append(midx_lv2) + expected = Index([1.1, 1.2, 1.3, + (1.1, datetime.datetime(2010, 12, 31, 15, 0)), + (1.2, datetime.datetime(2011, 1, 1, 15, 0)), + (1.3, datetime.datetime(2011, 1, 2, 15, 0))]) + self.assert_(result.equals(expected)) + + result = midx_lv2.append(idx1) + expected = Index([(1.1, datetime.datetime(2010, 12, 31, 15, 0)), + (1.2, datetime.datetime(2011, 1, 1, 15, 0)), + (1.3, datetime.datetime(2011, 1, 2, 15, 0)), + 1.1, 1.2, 1.3]) + self.assert_(result.equals(expected)) + + result = midx_lv2.append(midx_lv2) + expected = MultiIndex.from_arrays([idx1.append(idx1), idx2.append(idx2)]) + self.assert_(result.equals(expected)) + + result = midx_lv2.append(midx_lv3) + self.assert_(result.equals(expected)) + + result = midx_lv3.append(midx_lv2) + expected = Index._simple_new( + np.array([(1.1, datetime.datetime(2010, 12, 31, 15, 0), 'A'), + (1.2, datetime.datetime(2011, 1, 1, 15, 0), 'B'), + (1.3, datetime.datetime(2011, 1, 2, 15, 0), 'C'), + (1.1, datetime.datetime(2010, 12, 31, 15, 0)), + (1.2, datetime.datetime(2011, 1, 1, 15, 0)), + (1.3, datetime.datetime(2011, 1, 2, 15, 0))]), None) + self.assert_(result.equals(expected)) + def test_dataframe_constructor(self): multi = DataFrame(np.random.randn(4, 4), index=[np.array(['a', 'a', 'b', 'b']), diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py index c4d11f0c15b39..f2239bba520e7 100644 --- a/pandas/tools/tests/test_merge.py +++ b/pandas/tools/tests/test_merge.py @@ -8,6 +8,7 @@ import numpy as np import random +import pandas as pd from pandas.compat import range, lrange, lzip, zip, StringIO from pandas import compat, _np_version_under1p7 from pandas.tseries.index import DatetimeIndex @@ -1497,6 +1498,26 @@ def test_concat_multiindex_with_keys(self): tm.assert_frame_equal(result.ix[1], frame) self.assertEqual(result.index.nlevels, 3) + def test_concat_multiindex_with_tz(self): + # GH 6606 + df = DataFrame({'dt': [datetime(2014, 1, 1), + datetime(2014, 1, 2), + datetime(2014, 1, 3)], + 'b': ['A', 'B', 'C'], + 'c': [1, 2, 3], 'd': [4, 5, 6]}) + df['dt'] = df['dt'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific')) + df = df.set_index(['dt', 'b']) + + exp_idx1 = pd.DatetimeIndex(['2014-01-01', '2014-01-02', '2014-01-03'] * 2, + tz='US/Pacific', name='dt') + exp_idx2 = Index(['A', 'B', 'C'] * 2, name='b') + exp_idx = pd.MultiIndex.from_arrays([exp_idx1, exp_idx2]) + expected = DataFrame({'c': [1, 2, 3] * 2, 'd': [4, 5, 6] * 2}, + index=exp_idx, columns=['c', 'd']) + + result = concat([df, df]) + tm.assert_frame_equal(result, expected) + def test_concat_keys_and_levels(self): df = DataFrame(np.random.randn(1, 3)) df2 = DataFrame(np.random.randn(1, 4)) diff --git a/pandas/tools/tests/test_pivot.py b/pandas/tools/tests/test_pivot.py index 23320e5b4e3a1..e2f275f8a39d1 100644 --- a/pandas/tools/tests/test_pivot.py +++ b/pandas/tools/tests/test_pivot.py @@ -490,6 +490,41 @@ def test_pivot_timegrouper(self): values='Quantity', aggfunc=np.sum) tm.assert_frame_equal(result, expected.T) + def test_pivot_datetime_tz(self): + dates1 = ['2011-07-19 07:00:00', '2011-07-19 08:00:00', '2011-07-19 09:00:00', + '2011-07-19 07:00:00', '2011-07-19 08:00:00', '2011-07-19 09:00:00'] + dates2 = ['2013-01-01 15:00:00', '2013-01-01 15:00:00', '2013-01-01 15:00:00', + '2013-02-01 15:00:00', '2013-02-01 15:00:00', '2013-02-01 15:00:00'] + df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'], + 'dt1': dates1, 'dt2': dates2, + 'value1': range(6), 'value2': [1, 2] * 3}) + df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific')) + df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo')) + + exp_idx = pd.DatetimeIndex(['2011-07-19 07:00:00', '2011-07-19 08:00:00', + '2011-07-19 09:00:00'], tz='US/Pacific', name='dt1') + exp_col1 = Index(['value1', 'value1']) + exp_col2 = Index(['a', 'b'], name='label') + exp_col = MultiIndex.from_arrays([exp_col1, exp_col2]) + expected = DataFrame([[0, 3], [1, 4], [2, 5]], + index=exp_idx, columns=exp_col) + result = pivot_table(df, index=['dt1'], columns=['label'], values=['value1']) + tm.assert_frame_equal(result, expected) + + + exp_col1 = Index(['sum', 'sum', 'sum', 'sum', 'mean', 'mean', 'mean', 'mean']) + exp_col2 = Index(['value1', 'value1', 'value2', 'value2'] * 2) + exp_col3 = pd.DatetimeIndex(['2013-01-01 15:00:00', '2013-02-01 15:00:00'] * 4, + tz='Asia/Tokyo', name='dt2') + exp_col = MultiIndex.from_arrays([exp_col1, exp_col2, exp_col3]) + expected = DataFrame(np.array([[0, 3, 1, 2, 0, 3, 1, 2], [1, 4, 2, 1, 1, 4, 2, 1], + [2, 5, 1, 2, 2, 5, 1, 2]]), index=exp_idx, columns=exp_col) + + result = pivot_table(df, index=['dt1'], columns=['dt2'], values=['value1', 'value2'], + aggfunc=[np.sum, np.mean]) + tm.assert_frame_equal(result, expected) + + class TestCrosstab(tm.TestCase): def setUp(self):
Closes #6606. The problem is caused by `MultiIndex.append`. Based on current master, `MultiIndex.append` works as below. The fix covers case1 and case2 which the result will be `MultiIndex`. The fix is applied to `concat`, and also `pivot_table` work as expected. ``` import pandas as pd idx1 = pd.Index([1.1, 1.2, 1.3]) idx2 = pd.date_range('2011-01-01', freq='D', periods=3, tz='Asia/Tokyo') idx3 = pd.Index(['A', 'B', 'C']) midx_lv2 = pd.MultiIndex.from_arrays([idx1, idx2]) midx_lv3 = pd.MultiIndex.from_arrays([idx1, idx2, idx3]) #1 results in MultiIndex, which nlevels is 2 midx_lv2.append(midx_lv2) #2 results in MultiIndex, which nlevels is 2, not 3. 3rd line will be disappeared. midx_lv2.append(midx_lv3) #3 results in tupled Index. midx_lv2.append(idx1) #4 results in tupled Index. result = midx_lv3.append(midx_lv2) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7112
2014-05-13T14:25:54Z
2014-06-04T00:12:40Z
2014-06-04T00:12:39Z
2014-06-14T22:31:57Z
API: allow option truncate for display.show_dimensions to only show dimensions if truncated (GH6457)
diff --git a/doc/source/release.rst b/doc/source/release.rst index c2cf938f6f806..b8e5b31bd873c 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -346,6 +346,8 @@ Improvements to existing features - Regression in the display of a MultiIndexed Series with ``display.max_rows`` is less than the length of the series (:issue:`7101`) - :meth:`~DataFrame.describe` now accepts an array of percentiles to include in the summary statistics (:issue:`4196`) +- allow option ``'truncate'`` for ``display.show_dimensions`` to only show the dimensions if the + frame is truncated (:issue:`6547`) .. _release.bug_fixes-0.14.0: diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 9f050633a3b0d..4c099c627e6e5 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -204,6 +204,26 @@ API changes Display Changes ~~~~~~~~~~~~~~~ +- allow option ``'truncate'`` for ``display.show_dimensions`` to only show the dimensions if the + frame is truncated (:issue:`6547`). + + The default for ``display.show_dimensions`` will now be **truncate**! This is consistent with + how Series display length. + + .. ipython:: python + + dfd = pd.DataFrame(np.arange(25).reshape(-1,5), index=[0,1,2,3,4], columns=[0,1,2,3,4]) + + # show dimensions only if truncated + with pd.option_context('display.max_rows', 2, 'display.max_columns', 2, + 'display.show_dimensions', 'truncate'): + print(dfd) + + # show dimensions only if truncated + with pd.option_context('display.max_rows', 10, 'display.max_columns', 40, + 'display.show_dimensions', 'truncate'): + print(dfd) + - Regression in the display of a MultiIndexed Series with ``display.max_rows`` is less than the length of the series (:issue:`7101`) - Fixed a bug in the HTML repr of a truncated Series or DataFrame not showing the class name with the diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 9533c0921e1e3..48665bc98ab6c 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -120,8 +120,10 @@ """ pc_show_dimensions_doc = """ -: boolean +: boolean or 'truncate' Whether to print out dimensions at the end of DataFrame repr. + If 'truncate' is specified, only print out the dimensions if the + frame is truncated (e.g. not display all rows and/or columns) """ pc_line_width_doc = """ @@ -247,7 +249,8 @@ def mpl_style_cb(key): cf.register_option('encoding', detect_console_encoding(), pc_encoding_doc, validator=is_text) cf.register_option('expand_frame_repr', True, pc_expand_repr_doc) - cf.register_option('show_dimensions', True, pc_show_dimensions_doc) + cf.register_option('show_dimensions', True, pc_show_dimensions_doc, + validator=is_one_of_factory([True, False, 'truncate'])) cf.register_option('chop_threshold', None, pc_chop_threshold_doc) cf.register_option('max_seq_items', 100, pc_max_seq_items) cf.register_option('mpl_style', None, pc_mpl_style_doc, diff --git a/pandas/core/format.py b/pandas/core/format.py index 0a4f65b6bf0e6..49e98fe9911c5 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -236,6 +236,12 @@ def _strlen(x): class TableFormatter(object): + is_truncated = False + show_dimensions = None + + @property + def should_show_dimensions(self): + return self.show_dimensions is True or (self.show_dimensions == 'truncate' and self.is_truncated) def _get_formatter(self, i): if isinstance(self.formatters, (list, tuple)): @@ -315,9 +321,9 @@ def _to_str_columns(self): _strlen = _strlen_func() cols_to_show = self.columns[:self.max_cols] - truncate_h = self.max_cols and (len(self.columns) > self.max_cols) - truncate_v = self.max_rows and (len(self.frame) > self.max_rows) - self.truncated_v = truncate_v + self.truncated_h = truncate_h = self.max_cols and (len(self.columns) > self.max_cols) + self.truncated_v = truncate_v = self.max_rows and (len(self.frame) > self.max_rows) + self.is_truncated = self.truncated_h or self.truncated_v if truncate_h: cols_to_show = self.columns[:self.max_cols] else: @@ -380,7 +386,7 @@ def to_string(self): self.buf.writelines(text) - if self.show_dimensions: + if self.should_show_dimensions: self.buf.write("\n\n[%d rows x %d columns]" % (len(frame), len(frame.columns))) @@ -634,6 +640,8 @@ def __init__(self, formatter, classes=None, max_rows=None, max_cols=None): self.max_rows = max_rows or len(self.fmt.frame) self.max_cols = max_cols or len(self.fmt.columns) + self.show_dimensions = self.fmt.show_dimensions + self.is_truncated = self.max_rows < len(self.fmt.frame) or self.max_cols < len(self.fmt.columns) def write(self, s, indent=0): rs = com.pprint_thing(s) @@ -709,7 +717,7 @@ def write_result(self, buf): indent = self._write_body(indent) self.write('</table>', indent) - if self.fmt.show_dimensions: + if self.should_show_dimensions: by = chr(215) if compat.PY3 else unichr(215) # × self.write(u('<p>%d rows %s %d columns</p>') % (len(frame), by, len(frame.columns))) diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py index 6e3893e5c0850..75bd03a71466e 100644 --- a/pandas/tests/test_format.py +++ b/pandas/tests/test_format.py @@ -1197,6 +1197,26 @@ def test_to_string_line_width(self): s = df.to_string(line_width=80) self.assertEqual(max(len(l) for l in s.split('\n')), 80) + def test_show_dimensions(self): + df = pd.DataFrame(123, lrange(10, 15), lrange(30)) + + with option_context('display.max_rows', 10, 'display.max_columns', 40, 'display.width', + 500, 'display.expand_frame_repr', 'info', 'display.show_dimensions', True): + self.assertTrue('5 rows' in str(df)) + self.assertTrue('5 rows' in df._repr_html_()) + with option_context('display.max_rows', 10, 'display.max_columns', 40, 'display.width', + 500, 'display.expand_frame_repr', 'info', 'display.show_dimensions', False): + self.assertFalse('5 rows' in str(df)) + self.assertFalse('5 rows' in df._repr_html_()) + with option_context('display.max_rows', 2, 'display.max_columns', 2, 'display.width', + 500, 'display.expand_frame_repr', 'info', 'display.show_dimensions', 'truncate'): + self.assertTrue('5 rows' in str(df)) + self.assertTrue('5 rows' in df._repr_html_()) + with option_context('display.max_rows', 10, 'display.max_columns', 40, 'display.width', + 500, 'display.expand_frame_repr', 'info', 'display.show_dimensions', 'truncate'): + self.assertFalse('5 rows' in str(df)) + self.assertFalse('5 rows' in df._repr_html_()) + def test_to_html(self): # big mixed biggie = DataFrame({'A': randn(200),
closes #6547 add the option 'trunctate'. Should this be the new default????? ``` dfd = pd.DataFrame(np.arange(25).reshape(-1,5), index=[0,1,2,3,4], columns=[0,1,2,3,4]) # show dimensions only if truncated with pd.option_context('display.max_rows', 2, 'display.max_columns', 2, 'display.show_dimensions', 'truncate'): print(dfd) ``` ``` 0 1 0 0 1 ... 1 5 6 ... .. .. [5 rows x 5 columns] ``` ``` # show dimensions only if truncated with pd.option_context('display.max_rows', 10, 'display.max_columns', 40, 'display.show_dimensions', 'truncate'): print(dfd) ``` ``` 0 1 2 3 4 0 0 1 2 3 4 1 5 6 7 8 9 2 10 11 12 13 14 3 15 16 17 18 19 4 20 21 22 23 24 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7108
2014-05-13T13:20:41Z
2014-05-14T00:47:58Z
2014-05-14T00:47:57Z
2014-06-13T10:12:07Z
BUG: remove unique requirement rom MultiIndex.get_locs (GH7106)
diff --git a/doc/source/release.rst b/doc/source/release.rst index 429e4ea8169d1..190cf64884b44 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -295,7 +295,7 @@ Improvements to existing features the func (:issue:`6289`) - ``plot(legend='reverse')`` will now reverse the order of legend labels for most plot kinds. (:issue:`6014`) -- Allow multi-index slicers (:issue:`6134`, :issue:`4036`, :issue:`3057`, :issue:`2598`, :issue:`5641`) +- Allow multi-index slicers (:issue:`6134`, :issue:`4036`, :issue:`3057`, :issue:`2598`, :issue:`5641`, :issue:`7106`) - improve performance of slice indexing on Series with string keys (:issue:`6341`, :issue:`6372`) - implement joining a single-level indexed DataFrame on a matching column of a multi-indexed DataFrame (:issue:`3662`) - Performance improvement in indexing into a multi-indexed Series (:issue:`5567`) diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 4f3f7976e8007..49551c5bd3550 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -290,7 +290,7 @@ You can use ``slice(None)`` to select all the contents of *that* level. You do n As usual, **both sides** of the slicers are included as this is label indexing. See :ref:`the docs<indexing.mi_slicers>` -See also issues (:issue:`6134`, :issue:`4036`, :issue:`3057`, :issue:`2598`, :issue:`5641`) +See also issues (:issue:`6134`, :issue:`4036`, :issue:`3057`, :issue:`2598`, :issue:`5641`, :issue:`7106`) .. warning:: diff --git a/pandas/core/index.py b/pandas/core/index.py index 7a2a160e0dc33..3cbefbf141491 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -2595,7 +2595,7 @@ def get_level_values(self, level): unique = self.levels[num] # .values labels = self.labels[num] filled = com.take_1d(unique.values, labels, fill_value=unique._na_value) - values = unique._simple_new(filled, self.names[num], + values = unique._simple_new(filled, self.names[num], freq=getattr(unique, 'freq', None), tz=getattr(unique, 'tz', None)) return values @@ -3556,8 +3556,6 @@ def get_locs(self, tup): if not self.is_lexsorted_for_tuple(tup): raise KeyError('MultiIndex Slicing requires the index to be fully lexsorted' ' tuple len ({0}), lexsort depth ({1})'.format(len(tup), self.lexsort_depth)) - if not self.is_unique: - raise ValueError('MultiIndex Slicing requires a unique index') def _convert_indexer(r): if isinstance(r, slice): diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index 27fc8cee738c9..d7ab08d38eac7 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -1356,6 +1356,47 @@ def f(): df.loc[(slice(None),[1])] self.assertRaises(KeyError, f) + # not lexsorted + self.assertEquals(df.index.lexsort_depth,2) + df = df.sortlevel(level=1,axis=0) + self.assertEquals(df.index.lexsort_depth,0) + with tm.assertRaisesRegexp(KeyError, 'MultiIndex Slicing requires the index to be fully lexsorted tuple len \(2\), lexsort depth \(0\)'): + df.loc[(slice(None),df.loc[:,('a','bar')]>5),:] + + def test_multiindex_slicers_non_unique(self): + + # GH 7106 + # non-unique mi index support + df = DataFrame(dict(A = ['foo','foo','foo','foo'], + B = ['a','a','a','a'], + C = [1,2,1,3], + D = [1,2,3,4])).set_index(['A','B','C']).sortlevel() + self.assertFalse(df.index.is_unique) + expected = DataFrame(dict(A = ['foo','foo'], + B = ['a','a'], + C = [1,1], + D = [1,3])).set_index(['A','B','C']).sortlevel() + result = df.loc[(slice(None),slice(None),1),:] + assert_frame_equal(result, expected) + + # this is equivalent of an xs expression + result = df.xs(1,level=2,drop_level=False) + assert_frame_equal(result, expected) + + df = DataFrame(dict(A = ['foo','foo','foo','foo'], + B = ['a','a','a','a'], + C = [1,2,1,2], + D = [1,2,3,4])).set_index(['A','B','C']).sortlevel() + self.assertFalse(df.index.is_unique) + expected = DataFrame(dict(A = ['foo','foo'], + B = ['a','a'], + C = [1,1], + D = [1,3])).set_index(['A','B','C']).sortlevel() + result = df.loc[(slice(None),slice(None),1),:] + self.assertFalse(result.index.is_unique) + assert_frame_equal(result, expected) + + def test_per_axis_per_level_doc_examples(self): # test index maker
enables slicing of non-unique multi-indexes with slicers, closes #7106
https://api.github.com/repos/pandas-dev/pandas/pulls/7107
2014-05-13T12:20:40Z
2014-05-13T12:54:42Z
2014-05-13T12:54:42Z
2014-06-30T19:48:12Z
TYPO: idxmax is maximum, not minimum
diff --git a/pandas/core/series.py b/pandas/core/series.py index fc9b9ad936351..23d267601b3a2 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1179,7 +1179,7 @@ def idxmax(self, axis=None, out=None, skipna=True): Returns ------- - idxmax : Index of minimum of values + idxmax : Index of maximum of values Notes -----
Confusing doc typo was momentarily confusing.
https://api.github.com/repos/pandas-dev/pandas/pulls/7102
2014-05-12T15:35:31Z
2014-05-12T15:42:55Z
2014-05-12T15:42:55Z
2014-07-16T09:05:40Z
TST: sql add more tests (NaN handling, ..)
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index 3299dd3d70c92..bf9b38d4a969d 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -486,8 +486,8 @@ def test_timedelta(self): with tm.assert_produces_warning(UserWarning): df.to_sql('test_timedelta', self.conn) result = sql.read_sql_query('SELECT * FROM test_timedelta', self.conn) - tm.assert_series_equal(result['foo'], df['foo'].astype('int64')) - + tm.assert_series_equal(result['foo'], df['foo'].astype('int64')) + def test_to_sql_index_label(self): temp_frame = DataFrame({'col1': range(4)}) @@ -893,6 +893,34 @@ def test_datetime(self): else: tm.assert_frame_equal(result, df) + def test_datetime_NaT(self): + # status: + # - postgresql: gives error on inserting "0001-255-255T00:00:00" + # - sqlite3: works, but reading it with query returns '-001--1--1 -1:-1:-1.-00001' + + if self.driver == 'pymysql': + raise nose.SkipTest('writing datetime not working with pymysql') + if self.driver == 'psycopg2': + raise nose.SkipTest('writing datetime NaT not working with psycopg2') + + df = DataFrame({'A': date_range('2013-01-01 09:00:00', periods=3), + 'B': np.arange(3.0)}) + df.loc[1, 'A'] = np.nan + df.to_sql('test_datetime', self.conn, index=False) + + # with read_table -> type information from schema used + result = sql.read_sql_table('test_datetime', self.conn) + tm.assert_frame_equal(result, df) + + # with read_sql -> no type information -> sqlite has no native + result = sql.read_sql_query('SELECT * FROM test_datetime', self.conn) + if self.flavor == 'sqlite': + self.assertTrue(isinstance(result.loc[0, 'A'], string_types)) + result['A'] = to_datetime(result['A'], coerce=True) + tm.assert_frame_equal(result, df) + else: + tm.assert_frame_equal(result, df) + def test_mixed_dtype_insert(self): # see GH6509 s1 = Series(2**25 + 1,dtype=np.int32) @@ -905,6 +933,63 @@ def test_mixed_dtype_insert(self): tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True) + def test_nan_numeric(self): + if self.driver == 'pymysql': + raise nose.SkipTest('writing NaNs not working with pymysql') + + # NaNs in numeric float column + df = DataFrame({'A':[0, 1, 2], 'B':[0.2, np.nan, 5.6]}) + df.to_sql('test_nan', self.conn, index=False) + + # with read_table + result = sql.read_sql_table('test_nan', self.conn) + tm.assert_frame_equal(result, df) + + # with read_sql + result = sql.read_sql_query('SELECT * FROM test_nan', self.conn) + tm.assert_frame_equal(result, df) + + def test_nan_fullcolumn(self): + if self.driver == 'pymysql': + raise nose.SkipTest('writing NaNs not working with pymysql') + + # full NaN column (numeric float column) + df = DataFrame({'A':[0, 1, 2], 'B':[np.nan, np.nan, np.nan]}) + df.to_sql('test_nan', self.conn, index=False) + + if self.flavor == 'sqlite': + df['B'] = df['B'].astype('object') + df['B'] = None + + # with read_table + result = sql.read_sql_table('test_nan', self.conn) + tm.assert_frame_equal(result, df) + + # with read_sql + result = sql.read_sql_query('SELECT * FROM test_nan', self.conn) + tm.assert_frame_equal(result, df) + + def test_nan_string(self): + if self.driver == 'pymysql': + raise nose.SkipTest('writing NaNs not working with pymysql') + + # NaNs in string column + df = DataFrame({'A':[0, 1, 2], 'B':['a', 'b', np.nan]}) + df.to_sql('test_nan', self.conn, index=False) + + if self.flavor == 'sqlite': + df.loc[2, 'B'] = None + elif self.flavor == 'postgresql': + df = df.fillna('NaN') + + # with read_table + result = sql.read_sql_table('test_nan', self.conn) + tm.assert_frame_equal(result, df) + + # with read_sql + result = sql.read_sql_query('SELECT * FROM test_nan', self.conn) + tm.assert_frame_equal(result, df) + class TestSQLiteAlchemy(_TestSQLAlchemy): """
This PR just adds some tests, not fixes to get them all running. Summary: - NaN writing not working for MySQL (or pymysql), but does work for sqlite and postgresql - NaN reading does work for float columns. But not for string columns (converted to None or u'NaN') or for sqlite full float columns - NaT in datetime columns only works for sqlite for writing. Writing postgresql gives an error on inserting the date `0001-255-255T00:00:00`.
https://api.github.com/repos/pandas-dev/pandas/pulls/7100
2014-05-11T17:03:28Z
2014-05-13T10:29:47Z
2014-05-13T10:29:47Z
2014-07-08T03:52:05Z
BUG: GroupBy doesn't preserve timezone
diff --git a/doc/source/release.rst b/doc/source/release.rst index 429e4ea8169d1..e9fd0f1d5fd48 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -497,6 +497,7 @@ Bug Fixes - Bug in ``quantile`` with datetime values (:issue:`6965`) - Bug in ``Dataframe.set_index``, ``reindex`` and ``pivot`` don't preserve ``DatetimeIndex`` and ``PeriodIndex`` attributes (:issue:`3950`, :issue:`5878`, :issue:`6631`) - Bug in ``MultiIndex.get_level_values`` doesn't preserve ``DatetimeIndex`` and ``PeriodIndex`` attributes (:issue:`7092`) +- Bug in ``Groupby`` doesn't preserve ``tz`` (:issue:`3950`) pandas 0.13.1 ------------- diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index f0ecce0235b49..002d5480b9b7b 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -112,7 +112,8 @@ def factorize(values, sort=False, order=None, na_sentinel=-1): Returns ------- labels : the indexer to the original array - uniques : the unique values + uniques : ndarray (1-d) or Index + the unique values. Index is returned when passed values is Index or Series note: an array of Periods will ignore sort as it returns an always sorted PeriodIndex """ @@ -120,7 +121,8 @@ def factorize(values, sort=False, order=None, na_sentinel=-1): warn("order is deprecated." "See https://github.com/pydata/pandas/issues/6926", FutureWarning) - from pandas.tseries.period import PeriodIndex + from pandas.core.index import Index + from pandas.core.series import Series vals = np.asarray(values) is_datetime = com.is_datetime64_dtype(vals) (hash_klass, vec_klass), vals = _get_data_algo(vals, _hashtables) @@ -159,9 +161,11 @@ def factorize(values, sort=False, order=None, na_sentinel=-1): if is_datetime: uniques = uniques.astype('M8[ns]') - if isinstance(values, PeriodIndex): - uniques = PeriodIndex(ordinal=uniques, freq=values.freq) - + if isinstance(values, Index): + uniques = values._simple_new(uniques, None, freq=getattr(values, 'freq', None), + tz=getattr(values, 'tz', None)) + elif isinstance(values, Series): + uniques = Index(uniques) return labels, uniques diff --git a/pandas/core/base.py b/pandas/core/base.py index f614516c87d50..5605e1b0bb7ce 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -336,10 +336,7 @@ def factorize(self, sort=False, na_sentinel=-1): uniques : the unique Index """ from pandas.core.algorithms import factorize - from pandas.core.index import Index - labels, uniques = factorize(self, sort=sort, na_sentinel=na_sentinel) - uniques = Index(uniques) - return labels, uniques + return factorize(self, sort=sort, na_sentinel=na_sentinel) date = _field_accessor('date','Returns numpy array of datetime.date. The date part of the Timestamps') time = _field_accessor('time','Returns numpy array of datetime.time. The time part of the Timestamps') diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index ee6f8f1847258..dfadd34e2d205 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -80,16 +80,10 @@ def __init__(self, labels, levels=None, name=None): if levels is None: if name is None: name = getattr(labels, 'name', None) - if hasattr(labels, 'factorize'): - try: - labels, levels = labels.factorize(sort=True) - except TypeError: - labels, levels = labels.factorize(sort=False) - else: - try: - labels, levels = factorize(labels, sort=True) - except TypeError: - labels, levels = factorize(labels, sort=False) + try: + labels, levels = factorize(labels, sort=True) + except TypeError: + labels, levels = factorize(labels, sort=False) self.labels = labels self.levels = levels diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index bce3a993171a7..258005c8a08a9 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -1875,9 +1875,9 @@ def _make_labels(self): if self._was_factor: # pragma: no cover raise Exception('Should not call this method grouping by level') else: - labs, uniques = algos.factorize(self.grouper, sort=self.sort) + labels, uniques = algos.factorize(self.grouper, sort=self.sort) uniques = Index(uniques, name=self.name) - self._labels = labs + self._labels = labels self._group_index = uniques _groups = None diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 07bf247e5aafe..7081ba50e481f 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -116,11 +116,11 @@ def test_datelike(self): # periods are not 'sorted' as they are converted back into an index labels, uniques = algos.factorize(x) self.assert_numpy_array_equal(labels, np.array([ 0,0,0,1,1,0],dtype=np.int64)) - self.assert_numpy_array_equal(uniques, np.array([v1, v2],dtype=object)) + self.assert_numpy_array_equal(uniques, pd.PeriodIndex([v1, v2])) labels, uniques = algos.factorize(x,sort=True) self.assert_numpy_array_equal(labels, np.array([ 0,0,0,1,1,0],dtype=np.int64)) - self.assert_numpy_array_equal(uniques, np.array([v1, v2],dtype=object)) + self.assert_numpy_array_equal(uniques, pd.PeriodIndex([v1, v2])) class TestUnique(tm.TestCase): _multiprocess_can_split_ = True diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index 7a8fc8a3832db..fbeb947f4bbdc 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -2900,6 +2900,38 @@ def test_groupby_groups_datetimeindex(self): groups = grouped.groups tm.assert_isinstance(list(groups.keys())[0], datetime) + def test_groupby_groups_datetimeindex_tz(self): + # GH 3950 + dates = ['2011-07-19 07:00:00', '2011-07-19 08:00:00', '2011-07-19 09:00:00', + '2011-07-19 07:00:00', '2011-07-19 08:00:00', '2011-07-19 09:00:00'] + df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'], + 'datetime': dates, 'value1': range(6), 'value2': [1, 2] * 3}) + df['datetime'] = df['datetime'].apply(lambda d: Timestamp(d, tz='US/Pacific')) + + exp_idx1 = pd.DatetimeIndex(['2011-07-19 07:00:00', '2011-07-19 07:00:00', + '2011-07-19 08:00:00', '2011-07-19 08:00:00', + '2011-07-19 09:00:00', '2011-07-19 09:00:00'], + tz='US/Pacific', name='datetime') + exp_idx2 = Index(['a', 'b'] * 3, name='label') + exp_idx = MultiIndex.from_arrays([exp_idx1, exp_idx2]) + expected = DataFrame({'value1': [0, 3, 1, 4, 2, 5], 'value2': [1, 2, 2, 1, 1, 2]}, + index=exp_idx, columns=['value1', 'value2']) + + result = df.groupby(['datetime', 'label']).sum() + assert_frame_equal(result, expected) + + # by level + didx = pd.DatetimeIndex(dates, tz='Asia/Tokyo') + df = DataFrame({'value1': range(6), 'value2': [1, 2, 3, 1, 2, 3]}, index=didx) + + exp_idx = pd.DatetimeIndex(['2011-07-19 07:00:00', '2011-07-19 08:00:00', + '2011-07-19 09:00:00'], tz='Asia/Tokyo') + expected = DataFrame({'value1': [3, 5, 7], 'value2': [2, 4, 6]}, + index=exp_idx, columns=['value1', 'value2']) + + result = df.groupby(level=0).sum() + assert_frame_equal(result, expected) + def test_groupby_reindex_inside_function(self): from pandas.tseries.api import DatetimeIndex diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index b318e18fd6481..d9018ad92eb17 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -806,19 +806,6 @@ def to_period(self, freq=None): return PeriodIndex(self.values, freq=freq, tz=self.tz) - def factorize(self, sort=False, na_sentinel=-1): - """ - Index.factorize with handling for DatetimeIndex metadata - - Returns - ------- - result : DatetimeIndex - """ - from pandas.core.algorithms import factorize - labels, uniques = factorize(self.asi8, sort=sort, na_sentinel=na_sentinel) - uniques = DatetimeIndex._simple_new(uniques, name=self.name, freq=self.freq, tz=self.tz) - return labels, uniques - def order(self, return_indexer=False, ascending=True): """ Return sorted copy of Index
Closes #3950. This is a fix for `groupby` issue. (#7092 is for original report)
https://api.github.com/repos/pandas-dev/pandas/pulls/7099
2014-05-11T15:03:14Z
2014-05-13T12:13:26Z
2014-05-13T12:13:25Z
2014-06-13T19:00:49Z
BUG: DatetimeIndex with freq raises ValueError when passed value is short
diff --git a/doc/source/release.rst b/doc/source/release.rst index b12f4eca010d9..11684c32be5cc 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -499,6 +499,7 @@ Bug Fixes - Bug in ``MultiIndex.get_level_values`` doesn't preserve ``DatetimeIndex`` and ``PeriodIndex`` attributes (:issue:`7092`) - Bug in ``Groupby`` doesn't preserve ``tz`` (:issue:`3950`) - Bug in ``PeriodIndex`` partial string slicing (:issue:`6716`) +- Bug in ``DatetimeIndex`` specifying ``freq`` raises ``ValueError`` when passed value is too short pandas 0.13.1 ------------- diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index d9018ad92eb17..61285528a9b36 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -299,8 +299,10 @@ def __new__(cls, data=None, if freq is not None and not freq_infer: inferred = subarr.inferred_freq if inferred != freq.freqstr: - raise ValueError('Dates do not conform to passed ' - 'frequency') + on_freq = cls._generate(subarr[0], None, len(subarr), None, freq, tz=tz) + if not np.array_equal(subarr.asi8, on_freq.asi8): + raise ValueError('Inferred frequency {0} from passed dates does not' + 'conform to passed frequency {1}'.format(inferred, freq.freqstr)) if freq_infer: inferred = subarr.inferred_freq diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index 419ab48a01a07..e69f50165c639 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -2314,6 +2314,11 @@ def test_factorize(self): self.assert_numpy_array_equal(arr, exp_arr) self.assert_(idx.equals(exp_idx)) + def test_recreate_from_data(self): + for o in ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'N', 'H']: + org = PeriodIndex(start='2001/04/01', freq=o, periods=1) + idx = PeriodIndex(org.values, freq=o) + self.assert_(idx.equals(org)) def _permute(obj): return obj.take(np.random.permutation(len(obj))) diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index 79fd7cc6421e2..16a251c681f1a 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -285,6 +285,28 @@ def test_indexing(self): # this is a single date, so will raise self.assertRaises(KeyError, df.__getitem__, df.index[2],) + def test_recreate_from_data(self): + if _np_version_under1p7: + freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H'] + else: + freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C'] + + for f in freqs: + org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1) + idx = DatetimeIndex(org, freq=f) + self.assert_(idx.equals(org)) + + # unbale to create tz-aware 'A' and 'C' freq + if _np_version_under1p7: + freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H'] + else: + freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N'] + + for f in freqs: + org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1) + idx = DatetimeIndex(org, freq=f, tz='US/Pacific') + self.assert_(idx.equals(org)) + def assert_range_equal(left, right): assert(left.equals(right))
When `DatetimeIndex` is created by passing `freq` and `check_integrity=True`, passed `freq` and `inferred_freq` must be identical. But this comparison can fail if length of passed data doesn't have enough length to infer freq. Added additional logic to determine whether passed values are on the passed `freq`.
https://api.github.com/repos/pandas-dev/pandas/pulls/7098
2014-05-11T14:01:33Z
2014-05-14T13:08:10Z
2014-05-14T13:08:10Z
2014-06-20T17:01:29Z
REGR: Regression in groupby.nth() for out-of-bounds indexers (GH6621)
diff --git a/doc/source/release.rst b/doc/source/release.rst index 6255281a451ed..b670e6b5cea05 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -490,6 +490,7 @@ Bug Fixes - Bug in ``MultiIndex.from_arrays`` created from ``DatetimeIndex`` doesn't preserve ``freq`` and ``tz`` (:issue:`7090`) - Bug in ``unstack`` raises ``ValueError`` when ``MultiIndex`` contains ``PeriodIndex`` (:issue:`4342`) - Bug in ``boxplot`` and ``hist`` draws unnecessary axes (:issue:`6769`) +- Regression in ``groupby.nth()`` for out-of-bounds indexers (:issue:`6621`) pandas 0.13.1 ------------- diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index a1bcab159cefa..1bd9f27dc926a 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -891,7 +891,7 @@ def _reindex(keys, level=None): return self.obj.take(indexer, axis=axis) # this is not the most robust, but... - if (isinstance(labels, MultiIndex) and + if (isinstance(labels, MultiIndex) and len(keyarr) and not isinstance(keyarr[0], tuple)): level = 0 else: diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index 8b957484f0c0d..e5d8b92f7094f 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -241,6 +241,20 @@ def test_nth(self): assert_frame_equal(g.nth(7, dropna='any'), exp.iloc[[1, 2]]) assert_frame_equal(g.nth(2, dropna='any'), exp.iloc[[1, 2]]) + # out of bounds, regression from 0.13.1 + # GH 6621 + df = DataFrame({'color': {0: 'green', 1: 'green', 2: 'red', 3: 'red', 4: 'red'}, + 'food': {0: 'ham', 1: 'eggs', 2: 'eggs', 3: 'ham', 4: 'pork'}, + 'two': {0: 1.5456590000000001, 1: -0.070345000000000005, 2: -2.4004539999999999, 3: 0.46206000000000003, 4: 0.52350799999999997}, + 'one': {0: 0.56573799999999996, 1: -0.9742360000000001, 2: 1.033801, 3: -0.78543499999999999, 4: 0.70422799999999997}}).set_index(['color', 'food']) + + result = df.groupby(level=0).nth(2) + expected = df.iloc[[-1]] + assert_frame_equal(result,expected) + + result = df.groupby(level=0).nth(3) + expected = df.loc[[]] + assert_frame_equal(result,expected) def test_grouper_index_types(self): # related GH5375 diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index e36fdffc8cc31..27fc8cee738c9 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -3253,6 +3253,7 @@ def test_iloc_empty_list_indexer_is_ok(self): df = mkdf(5, 2) assert_frame_equal(df.iloc[:,[]], df.iloc[:, :0]) # vertical empty assert_frame_equal(df.iloc[[],:], df.iloc[:0, :]) # horizontal empty + assert_frame_equal(df.iloc[[]], df.iloc[:0, :]) # horizontal empty # FIXME: fix loc & xs def test_loc_empty_list_indexer_is_ok(self): @@ -3261,6 +3262,7 @@ def test_loc_empty_list_indexer_is_ok(self): df = mkdf(5, 2) assert_frame_equal(df.loc[:,[]], df.iloc[:, :0]) # vertical empty assert_frame_equal(df.loc[[],:], df.iloc[:0, :]) # horizontal empty + assert_frame_equal(df.loc[[]], df.iloc[:0, :]) # horizontal empty def test_ix_empty_list_indexer_is_ok(self): raise nose.SkipTest('ix discards columns names') @@ -3268,6 +3270,7 @@ def test_ix_empty_list_indexer_is_ok(self): df = mkdf(5, 2) assert_frame_equal(df.ix[:,[]], df.iloc[:, :0]) # vertical empty assert_frame_equal(df.ix[[],:], df.iloc[:0, :]) # horizontal empty + assert_frame_equal(df.ix[[]], df.iloc[:0, :]) # horizontal empty def test_deprecate_float_indexers(self):
closes #6621
https://api.github.com/repos/pandas-dev/pandas/pulls/7094
2014-05-10T19:31:34Z
2014-05-10T19:49:51Z
2014-05-10T19:49:51Z
2014-07-16T09:05:33Z
BUG: Let DataFrame.quantile() handle datetime
diff --git a/doc/source/release.rst b/doc/source/release.rst index b670e6b5cea05..ec8f44f955043 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -491,6 +491,7 @@ Bug Fixes - Bug in ``unstack`` raises ``ValueError`` when ``MultiIndex`` contains ``PeriodIndex`` (:issue:`4342`) - Bug in ``boxplot`` and ``hist`` draws unnecessary axes (:issue:`6769`) - Regression in ``groupby.nth()`` for out-of-bounds indexers (:issue:`6621`) +- Bug in ``quantile`` with datetime values (:issue:`6965`) pandas 0.13.1 ------------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 4345154437bf5..66ba061ab35ef 100755 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4188,23 +4188,43 @@ def quantile(self, q=0.5, axis=0, numeric_only=True): """ per = np.asarray(q) * 100 + if not com.is_list_like(per): + per = [per] + q = [q] + squeeze = True + else: + squeeze = False + def f(arr, per): - arr = arr.values - if arr.dtype != np.float_: - arr = arr.astype(float) - arr = arr[notnull(arr)] - if len(arr) == 0: + if arr._is_datelike_mixed_type: + values = _values_from_object(arr).view('i8') + else: + values = arr.astype(float) + values = values[notnull(values)] + if len(values) == 0: return NA else: - return _quantile(arr, per) + return _quantile(values, per) data = self._get_numeric_data() if numeric_only else self - if com.is_list_like(per): - from pandas.tools.merge import concat - return concat([data.apply(f, axis=axis, args=(x,)) for x in per], - axis=1, keys=per/100.).T - else: - return data.apply(f, axis=axis, args=(per,)) + + # need to know which cols are timestamp going in so that we can + # map timestamp over them after getting the quantile. + is_dt_col = data.dtypes.map(com.is_datetime64_dtype) + is_dt_col = is_dt_col[is_dt_col].index + + quantiles = [[f(vals, x) for x in per] + for (_, vals) in data.iteritems()] + result = DataFrame(quantiles, index=data._info_axis, columns=q).T + if len(is_dt_col) > 0: + result[is_dt_col] = result[is_dt_col].applymap(lib.Timestamp) + if squeeze: + if result.shape == (1, 1): + result = result.T.iloc[:, 0] # don't want scalar + else: + result = result.T.squeeze() + result.name = None # For groupby, so it can set an index name + return result def rank(self, axis=0, numeric_only=None, method='average', na_option='keep', ascending=True, pct=False): diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 1e803a46d76de..7365e4be187b0 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -10994,6 +10994,25 @@ def test_quantile_multi(self): index=[.1, .9]) assert_frame_equal(result, expected) + def test_quantile_datetime(self): + df = DataFrame({'a': pd.to_datetime(['2010', '2011']), 'b': [0, 5]}) + + # exclude datetime + result = df.quantile(.5) + expected = Series([2.5], index=['b']) + + # datetime + result = df.quantile(.5, numeric_only=False) + expected = Series([Timestamp('2010-07-02 12:00:00'), 2.5], + index=['a', 'b']) + assert_series_equal(result, expected) + + # datetime w/ multi + result = df.quantile([.5], numeric_only=False) + expected = DataFrame([[Timestamp('2010-07-02 12:00:00'), 2.5]], + index=[.5], columns=['a', 'b']) + assert_frame_equal(result, expected) + def test_cumsum(self): self.tsframe.ix[5:10, 0] = nan self.tsframe.ix[10:15, 1] = nan
Closes https://github.com/pydata/pandas/issues/6965
https://api.github.com/repos/pandas-dev/pandas/pulls/7093
2014-05-10T17:20:15Z
2014-05-10T21:51:19Z
2014-05-10T21:51:19Z
2016-11-03T12:37:57Z
BUG: tz info lost by set_index and reindex
diff --git a/doc/source/release.rst b/doc/source/release.rst index 463cf928660dd..708300bca725d 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -495,6 +495,8 @@ Bug Fixes - Bug in ``boxplot`` and ``hist`` draws unnecessary axes (:issue:`6769`) - Regression in ``groupby.nth()`` for out-of-bounds indexers (:issue:`6621`) - Bug in ``quantile`` with datetime values (:issue:`6965`) +- Bug in ``Dataframe.set_index``, ``reindex`` and ``pivot`` don't preserve ``DatetimeIndex`` and ``PeriodIndex`` attributes (:issue:`3950`, :issue:`5878`, :issue:`6631`) +- Bug in ``MultiIndex.get_level_values`` doesn't preserve ``DatetimeIndex`` and ``PeriodIndex`` attributes (:issue:`7092`) pandas 0.13.1 ------------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 66ba061ab35ef..773270ba1d593 100755 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2220,7 +2220,7 @@ def set_index(self, keys, drop=True, append=False, inplace=False, for i in range(self.index.nlevels): arrays.append(self.index.get_level_values(i)) else: - arrays.append(np.asarray(self.index)) + arrays.append(self.index) to_remove = [] for col in keys: @@ -2232,9 +2232,12 @@ def set_index(self, keys, drop=True, append=False, inplace=False, level = col.get_level_values(col.nlevels - 1) names.extend(col.names) - elif isinstance(col, (Series, Index)): + elif isinstance(col, Series): level = col.values names.append(col.name) + elif isinstance(col, Index): + level = col + names.append(col.name) elif isinstance(col, (list, np.ndarray)): level = col names.append(None) diff --git a/pandas/core/index.py b/pandas/core/index.py index c3619b992028d..7a2a160e0dc33 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -114,9 +114,7 @@ def __new__(cls, data, dtype=None, copy=False, name=None, fastpath=False, # no class inference! if fastpath: - subarr = data.view(cls) - subarr.name = name - return subarr + return cls._simple_new(data, name) from pandas.tseries.period import PeriodIndex if isinstance(data, (np.ndarray, ABCSeries)): @@ -185,6 +183,12 @@ def __new__(cls, data, dtype=None, copy=False, name=None, fastpath=False, subarr._set_names([name]) return subarr + @classmethod + def _simple_new(cls, values, name, **kwargs): + result = values.view(cls) + result.name = name + return result + def is_(self, other): """ More flexible, faster check like ``is`` but that works through views @@ -2588,11 +2592,12 @@ def get_level_values(self, level): values : ndarray """ num = self._get_level_number(level) - unique_vals = self.levels[num] # .values + unique = self.levels[num] # .values labels = self.labels[num] - values = Index(com.take_1d(unique_vals.values, labels, - fill_value=unique_vals._na_value)) - values.name = self.names[num] + filled = com.take_1d(unique.values, labels, fill_value=unique._na_value) + values = unique._simple_new(filled, self.names[num], + freq=getattr(unique, 'freq', None), + tz=getattr(unique, 'tz', None)) return values def format(self, space=2, sparsify=None, adjoin=True, names=False, diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py index 65eadff002eb6..a3a2e6849bce4 100644 --- a/pandas/core/reshape.py +++ b/pandas/core/reshape.py @@ -82,11 +82,10 @@ def __init__(self, values, index, level=-1, value_columns=None): labels = index.labels def _make_index(lev, lab): - if isinstance(lev, PeriodIndex): - i = lev.copy() - else: - i = lev.__class__(_make_index_array_level(lev.values, lab)) - i.name = lev.name + values = _make_index_array_level(lev.values, lab) + i = lev._simple_new(values, lev.name, + freq=getattr(lev, 'freq', None), + tz=getattr(lev, 'tz', None)) return i self.new_index_levels = [_make_index(lev, lab) diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index dafbfd07ca51d..f12d3c505741b 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -180,6 +180,19 @@ def test_index_ctor_infer_periodindex(self): assert_array_equal(rs, xp) tm.assert_isinstance(rs, PeriodIndex) + def test_constructor_simple_new(self): + idx = Index([1, 2, 3, 4, 5], name='int') + result = idx._simple_new(idx, 'int') + self.assert_(result.equals(idx)) + + idx = Index([1.1, np.nan, 2.2, 3.0], name='float') + result = idx._simple_new(idx, 'float') + self.assert_(result.equals(idx)) + + idx = Index(['A', 'B', 'C', np.nan], name='obj') + result = idx._simple_new(idx, 'obj') + self.assert_(result.equals(idx)) + def test_copy(self): i = Index([], name='Foo') i_copy = i.copy() diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 63bace138884f..8a75257a71eaa 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -1989,6 +1989,78 @@ def test_datetimeindex(self): self.assert_(idx.levels[0].equals(expected1)) self.assert_(idx.levels[1].equals(idx2)) + def test_set_index_datetime(self): + # GH 3950 + df = pd.DataFrame({'label':['a', 'a', 'a', 'b', 'b', 'b'], + 'datetime':['2011-07-19 07:00:00', '2011-07-19 08:00:00', + '2011-07-19 09:00:00', '2011-07-19 07:00:00', + '2011-07-19 08:00:00', '2011-07-19 09:00:00'], + 'value':range(6)}) + df.index = pd.to_datetime(df.pop('datetime'), utc=True) + df.index = df.index.tz_localize('UTC').tz_convert('US/Pacific') + + expected = pd.DatetimeIndex(['2011-07-19 07:00:00', '2011-07-19 08:00:00', '2011-07-19 09:00:00']) + expected = expected.tz_localize('UTC').tz_convert('US/Pacific') + + df = df.set_index('label', append=True) + self.assert_(df.index.levels[0].equals(expected)) + self.assert_(df.index.levels[1].equals(pd.Index(['a', 'b']))) + + df = df.swaplevel(0, 1) + self.assert_(df.index.levels[0].equals(pd.Index(['a', 'b']))) + self.assert_(df.index.levels[1].equals(expected)) + + + df = DataFrame(np.random.random(6)) + idx1 = pd.DatetimeIndex(['2011-07-19 07:00:00', '2011-07-19 08:00:00', + '2011-07-19 09:00:00', '2011-07-19 07:00:00', + '2011-07-19 08:00:00', '2011-07-19 09:00:00'], tz='US/Eastern') + idx2 = pd.DatetimeIndex(['2012-04-01 09:00', '2012-04-01 09:00', '2012-04-01 09:00', + '2012-04-02 09:00', '2012-04-02 09:00', '2012-04-02 09:00'], + tz='US/Eastern') + idx3 = pd.date_range('2011-01-01 09:00', periods=6, tz='Asia/Tokyo') + + df = df.set_index(idx1) + df = df.set_index(idx2, append=True) + df = df.set_index(idx3, append=True) + + expected1 = pd.DatetimeIndex(['2011-07-19 07:00:00', '2011-07-19 08:00:00', + '2011-07-19 09:00:00'], tz='US/Eastern') + expected2 = pd.DatetimeIndex(['2012-04-01 09:00', '2012-04-02 09:00'], tz='US/Eastern') + + self.assert_(df.index.levels[0].equals(expected1)) + self.assert_(df.index.levels[1].equals(expected2)) + self.assert_(df.index.levels[2].equals(idx3)) + + # GH 7092 + self.assert_(df.index.get_level_values(0).equals(idx1)) + self.assert_(df.index.get_level_values(1).equals(idx2)) + self.assert_(df.index.get_level_values(2).equals(idx3)) + + def test_set_index_period(self): + # GH 6631 + df = DataFrame(np.random.random(6)) + idx1 = pd.period_range('2011-01-01', periods=3, freq='M') + idx1 = idx1.append(idx1) + idx2 = pd.period_range('2013-01-01 09:00', periods=2, freq='H') + idx2 = idx2.append(idx2).append(idx2) + idx3 = pd.period_range('2005', periods=6, freq='Y') + + df = df.set_index(idx1) + df = df.set_index(idx2, append=True) + df = df.set_index(idx3, append=True) + + expected1 = pd.period_range('2011-01-01', periods=3, freq='M') + expected2 = pd.period_range('2013-01-01 09:00', periods=2, freq='H') + + self.assert_(df.index.levels[0].equals(expected1)) + self.assert_(df.index.levels[1].equals(expected2)) + self.assert_(df.index.levels[2].equals(idx3)) + + self.assert_(df.index.get_level_values(0).equals(idx1)) + self.assert_(df.index.get_level_values(1).equals(idx2)) + self.assert_(df.index.get_level_values(2).equals(idx3)) + if __name__ == '__main__': diff --git a/pandas/tools/tests/test_pivot.py b/pandas/tools/tests/test_pivot.py index ed1f63a42cbae..09fdb5e3fed3e 100644 --- a/pandas/tools/tests/test_pivot.py +++ b/pandas/tools/tests/test_pivot.py @@ -3,7 +3,7 @@ import numpy as np from numpy.testing import assert_equal -import pandas +import pandas as pd from pandas import DataFrame, Series, Index, MultiIndex, Grouper from pandas.tools.merge import concat from pandas.tools.pivot import pivot_table, crosstab @@ -181,6 +181,42 @@ def test_pivot_index_with_nan(self): columns = Index(['C1','C2','C3','C4'],name='b')) tm.assert_frame_equal(result, expected) + def test_pivot_with_tz(self): + # GH 5878 + df = DataFrame({'dt1': [datetime.datetime(2013, 1, 1, 9, 0), + datetime.datetime(2013, 1, 2, 9, 0), + datetime.datetime(2013, 1, 1, 9, 0), + datetime.datetime(2013, 1, 2, 9, 0)], + 'dt2': [datetime.datetime(2014, 1, 1, 9, 0), + datetime.datetime(2014, 1, 1, 9, 0), + datetime.datetime(2014, 1, 2, 9, 0), + datetime.datetime(2014, 1, 2, 9, 0)], + 'data1': range(4), 'data2': range(4)}) + + df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific')) + df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo')) + + exp_col1 = Index(['data1', 'data1', 'data2', 'data2']) + exp_col2 = pd.DatetimeIndex(['2014/01/01 09:00', '2014/01/02 09:00'] * 2, + name='dt2', tz='Asia/Tokyo') + exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2]) + expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]], + index=pd.DatetimeIndex(['2013/01/01 09:00', '2013/01/02 09:00'], + name='dt1', tz='US/Pacific'), + columns=exp_col) + + pv = df.pivot(index='dt1', columns='dt2') + tm.assert_frame_equal(pv, expected) + + expected = DataFrame([[0, 2], [1, 3]], + index=pd.DatetimeIndex(['2013/01/01 09:00', '2013/01/02 09:00'], + name='dt1', tz='US/Pacific'), + columns=pd.DatetimeIndex(['2014/01/01 09:00', '2014/01/02 09:00'], + name='dt2', tz='Asia/Tokyo')) + + pv = df.pivot(index='dt1', columns='dt2', values='data1') + tm.assert_frame_equal(pv, expected) + def test_margins(self): def _check_output(res, col, index=['A', 'B'], columns=['C']): cmarg = res['All'][:-1] @@ -235,7 +271,7 @@ def test_pivot_integer_columns(self): d = datetime.date.min data = list(product(['foo', 'bar'], ['A', 'B', 'C'], ['x1', 'x2'], [d + datetime.timedelta(i) for i in range(20)], [1.0])) - df = pandas.DataFrame(data) + df = DataFrame(data) table = df.pivot_table(values=4, index=[0, 1, 3], columns=[2]) df2 = df.rename(columns=str) @@ -286,7 +322,7 @@ def test_pivot_columns_lexsorted(self): iproduct = np.random.randint(0, len(products), n) items['Index'] = products['Index'][iproduct] items['Symbol'] = products['Symbol'][iproduct] - dr = pandas.date_range(datetime.date(2000, 1, 1), datetime.date(2010, 12, 31)) + dr = pd.date_range(datetime.date(2000, 1, 1), datetime.date(2010, 12, 31)) dates = dr[np.random.randint(0, len(dr), n)] items['Year'] = dates.year items['Month'] = dates.month diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index 01a93b712b42c..113be28f86976 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -669,6 +669,13 @@ def _from_arraylike(cls, data, freq, tz): return data, freq + @classmethod + def _simple_new(cls, values, name, freq=None, **kwargs): + result = values.view(cls) + result.name = name + result.freq = freq + return result + def __contains__(self, key): if not isinstance(key, Period) or key.freq != self.freq: if isinstance(key, compat.string_types): diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index 1a72c7925b6ee..4117ca660db35 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -1194,6 +1194,14 @@ def test_constructor_datetime64arr(self): self.assertRaises(ValueError, PeriodIndex, vals, freq='D') + def test_constructor_simple_new(self): + idx = period_range('2007-01', name='p', periods=20, freq='M') + result = idx._simple_new(idx, 'p', freq=idx.freq) + self.assert_(result.equals(idx)) + + result = idx._simple_new(idx.astype('i8'), 'p', freq=idx.freq) + self.assert_(result.equals(idx)) + def test_is_(self): create_index = lambda: PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009') @@ -1390,6 +1398,17 @@ def test_frame_setitem(self): tm.assert_isinstance(rs.index, PeriodIndex) self.assert_(rs.index.equals(rng)) + def test_period_set_index_reindex(self): + # GH 6631 + df = DataFrame(np.random.random(6)) + idx1 = period_range('2011/01/01', periods=6, freq='M') + idx2 = period_range('2013', periods=6, freq='A') + + df = df.set_index(idx1) + self.assert_(df.index.equals(idx1)) + df = df.reindex(idx2) + self.assert_(df.index.equals(idx2)) + def test_nested_dict_frame_constructor(self): rng = period_range('1/1/2000', periods=5) df = DataFrame(randn(10, 5), columns=rng) diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index 0c0e7692b7d4c..79fd7cc6421e2 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -2510,6 +2510,17 @@ def test_dti_reset_index_round_trip(self): self.assertEquals(df.index[0], stamp) self.assertEquals(df.reset_index()['Date'][0], stamp) + def test_dti_set_index_reindex(self): + # GH 6631 + df = DataFrame(np.random.random(6)) + idx1 = date_range('2011/01/01', periods=6, freq='M', tz='US/Eastern') + idx2 = date_range('2013', periods=6, freq='A', tz='Asia/Tokyo') + + df = df.set_index(idx1) + self.assert_(df.index.equals(idx1)) + df = df.reindex(idx2) + self.assert_(df.index.equals(idx2)) + def test_datetimeindex_union_join_empty(self): dti = DatetimeIndex(start='1/1/2001', end='2/1/2001', freq='D') empty = Index([])
Closes #6631. Closes #5878. Regarding #3950, original problem can be fixed by this, but `groupby` problem isn't. Also, this includes the fix for `MultiIndex.get_level_values` doesn't retain `tz` and `freq`, as the method is used in `set_index`. ``` # current master >>> import pandas as pd >>> didx = pd.DatetimeIndex(start='2013/01/01', freq='H', periods=4, tz='Asia/Tokyo') >>> pidx = pd.PeriodIndex(start='2013/01/01', freq='H', periods=4) >>> midx = pd.MultiIndex.from_arrays([didx, pidx]) >>> midx.get_level_values(0) <class 'pandas.tseries.index.DatetimeIndex'> [2012-12-31 15:00:00, ..., 2012-12-31 18:00:00] Length: 4, Freq: None, Timezone: None >>> midx.get_level_values(1) Int64Index([376944, 376945, 376946, 376947], dtype='int64') ``` ``` # after fix >>> midx.get_level_values(0) <class 'pandas.tseries.index.DatetimeIndex'> [2013-01-01 00:00:00+09:00, ..., 2013-01-01 03:00:00+09:00] Length: 4, Freq: H, Timezone: Asia/Tokyo >>> midx.get_level_values(1) PeriodIndex([u'2013-01-01 00:00', u'2013-01-01 01:00', u'2013-01-01 02:00', u'2013-01-01 03:00'], freq='H') ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7092
2014-05-10T15:28:00Z
2014-05-12T13:34:58Z
2014-05-12T13:34:58Z
2014-06-13T19:01:07Z
ENH/CLN: Add factorize to IndexOpsMixin
diff --git a/doc/source/api.rst b/doc/source/api.rst index aa5c58652d550..60e8fc634070e 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -335,6 +335,7 @@ Computations / Descriptive Stats Series.cumsum Series.describe Series.diff + Series.factorize Series.kurt Series.mad Series.max @@ -1040,6 +1041,7 @@ Modifying and Computations Index.diff Index.drop Index.equals + Index.factorize Index.identical Index.insert Index.order diff --git a/doc/source/release.rst b/doc/source/release.rst index 3e6f7bb232156..53abc22cd02f4 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -203,6 +203,7 @@ API Changes ignored (:issue:`6607`) - Produce :class:`~pandas.io.parsers.ParserWarning` on fallback to python parser when no options are ignored (:issue:`6607`) +- Added ``factorize`` functions to ``Index`` and ``Series`` to get indexer and unique values (:issue:`7090`) Deprecations ~~~~~~~~~~~~ @@ -485,6 +486,7 @@ Bug Fixes - Bug in cache coherence with chained indexing and slicing; add ``_is_view`` property to ``NDFrame`` to correctly predict views; mark ``is_copy`` on ``xs` only if its an actual copy (and not a view) (:issue:`7084`) - Bug in DatetimeIndex creation from string ndarray with ``dayfirst=True`` (:issue:`5917`) +- Bug in ``MultiIndex.from_arrays`` created from ``DatetimeIndex`` doesn't preserve ``freq`` and ``tz`` (:issue:`7090`) pandas 0.13.1 ------------- diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index cde6bf3bfd670..7548072f04d1d 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -245,6 +245,7 @@ API changes - add ``inplace`` keyword to ``Series.order/sort`` to make them inverses (:issue:`6859`) - accept ``TextFileReader`` in ``concat``, which was affecting a common user idiom (:issue:`6583`), this was a regression from 0.13.1 +- Added ``factorize`` functions to ``Index`` and ``Series`` to get indexer and unique values (:issue:`7090`) .. _whatsnew_0140.sql: diff --git a/pandas/core/base.py b/pandas/core/base.py index 1e9adb60f534e..f614516c87d50 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -319,6 +319,28 @@ def nunique(self): """ return len(self.value_counts()) + def factorize(self, sort=False, na_sentinel=-1): + """ + Encode the object as an enumerated type or categorical variable + + Parameters + ---------- + sort : boolean, default False + Sort by values + na_sentinel: int, default -1 + Value to mark "not found" + + Returns + ------- + labels : the indexer to the original array + uniques : the unique Index + """ + from pandas.core.algorithms import factorize + from pandas.core.index import Index + labels, uniques = factorize(self, sort=sort, na_sentinel=na_sentinel) + uniques = Index(uniques) + return labels, uniques + date = _field_accessor('date','Returns numpy array of datetime.date. The date part of the Timestamps') time = _field_accessor('time','Returns numpy array of datetime.time. The time part of the Timestamps') year = _field_accessor('year', "The year of the datetime") diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index b255831e51ae0..ee6f8f1847258 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -80,8 +80,11 @@ def __init__(self, labels, levels=None, name=None): if levels is None: if name is None: name = getattr(labels, 'name', None) - if isinstance(labels, Index) and hasattr(labels, 'factorize'): - labels, levels = labels.factorize() + if hasattr(labels, 'factorize'): + try: + labels, levels = labels.factorize(sort=True) + except TypeError: + labels, levels = labels.factorize(sort=False) else: try: labels, levels = factorize(labels, sort=True) @@ -103,16 +106,7 @@ def from_array(cls, data): Can be an Index or array-like. The levels are assumed to be the unique values of `data`. """ - if isinstance(data, Index) and hasattr(data, 'factorize'): - labels, levels = data.factorize() - else: - try: - labels, levels = factorize(data, sort=True) - except TypeError: - labels, levels = factorize(data, sort=False) - - return Categorical(labels, levels, - name=getattr(data, 'name', None)) + return Categorical(data) _levels = None diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index 81b3d4631bfbf..e07b1ff15d26f 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -398,6 +398,48 @@ def test_value_counts_inferred(self): self.assert_numpy_array_equal(td.unique(), expected) self.assertEquals(td.nunique(), 1) + def test_factorize(self): + for o in self.objs: + exp_arr = np.array(range(len(o))) + labels, uniques = o.factorize() + + self.assert_numpy_array_equal(labels, exp_arr) + if isinstance(o, Series): + expected = Index(o.values) + self.assert_numpy_array_equal(uniques, expected) + else: + self.assertTrue(uniques.equals(o)) + + for o in self.objs: + # sort by value, and create duplicates + if isinstance(o, Series): + o.sort() + else: + indexer = o.argsort() + o = o.take(indexer) + n = o[5:].append(o) + + exp_arr = np.array([5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + labels, uniques = n.factorize(sort=True) + + self.assert_numpy_array_equal(labels, exp_arr) + if isinstance(o, Series): + expected = Index(o.values) + self.assert_numpy_array_equal(uniques, expected) + else: + self.assertTrue(uniques.equals(o)) + + exp_arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4]) + labels, uniques = n.factorize(sort=False) + self.assert_numpy_array_equal(labels, exp_arr) + + if isinstance(o, Series): + expected = Index(np.concatenate([o.values[5:10], o.values[:5]])) + self.assert_numpy_array_equal(uniques, expected) + else: + expected = o[5:].append(o[:5]) + self.assertTrue(uniques.equals(expected)) + class TestDatetimeIndexOps(Ops): _allowed = '_allow_datetime_index_ops' diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py index e4d7ef2f9a8c6..04e9f238d1dbe 100644 --- a/pandas/tests/test_categorical.py +++ b/pandas/tests/test_categorical.py @@ -10,6 +10,7 @@ from pandas.core.categorical import Categorical from pandas.core.index import Index, Int64Index, MultiIndex from pandas.core.frame import DataFrame +from pandas.tseries.period import PeriodIndex from pandas.util.testing import assert_almost_equal import pandas.core.common as com @@ -180,6 +181,37 @@ def test_empty_print(self): "Index([], dtype=object)") self.assertEqual(repr(factor), expected) + def test_periodindex(self): + idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02', + '2014-03', '2014-03'], freq='M') + cat1 = Categorical.from_array(idx1) + + exp_arr = np.array([0, 0, 1, 1, 2, 2]) + exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M') + + self.assert_numpy_array_equal(cat1.labels, exp_arr) + self.assert_(cat1.levels.equals(exp_idx)) + + idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01', + '2014-03', '2014-01'], freq='M') + cat2 = Categorical.from_array(idx2) + + exp_arr = np.array([2, 2, 1, 0, 2, 0]) + + self.assert_numpy_array_equal(cat2.labels, exp_arr) + self.assert_(cat2.levels.equals(exp_idx)) + + idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09', + '2013-08', '2013-07', '2013-05'], freq='M') + cat3 = Categorical.from_array(idx3) + + exp_arr = np.array([6, 5, 4, 3, 2, 1, 0]) + exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09', + '2013-10', '2013-11', '2013-12'], freq='M') + + self.assert_numpy_array_equal(cat3.labels, exp_arr) + self.assert_(cat3.levels.equals(exp_idx)) + if __name__ == '__main__': import nose diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index a6c2bb9f56602..00f7b65f5690e 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -1899,6 +1899,17 @@ def test_multiindex_set_index(self): # it works! df.set_index(index) + def test_datetimeindex(self): + idx1 = pd.DatetimeIndex(['2013-04-01 9:00', '2013-04-02 9:00', '2013-04-03 9:00'] * 2, tz='Asia/Tokyo') + idx2 = pd.date_range('2010/01/01', periods=6, freq='M', tz='US/Eastern') + idx = MultiIndex.from_arrays([idx1, idx2]) + + expected1 = pd.DatetimeIndex(['2013-04-01 9:00', '2013-04-02 9:00', '2013-04-03 9:00'], tz='Asia/Tokyo') + + self.assert_(idx.levels[0].equals(expected1)) + self.assert_(idx.levels[1].equals(idx2)) + + if __name__ == '__main__': import nose diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index d9018ad92eb17..b318e18fd6481 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -806,6 +806,19 @@ def to_period(self, freq=None): return PeriodIndex(self.values, freq=freq, tz=self.tz) + def factorize(self, sort=False, na_sentinel=-1): + """ + Index.factorize with handling for DatetimeIndex metadata + + Returns + ------- + result : DatetimeIndex + """ + from pandas.core.algorithms import factorize + labels, uniques = factorize(self.asi8, sort=sort, na_sentinel=na_sentinel) + uniques = DatetimeIndex._simple_new(uniques, name=self.name, freq=self.freq, tz=self.tz) + return labels, uniques + def order(self, return_indexer=False, ascending=True): """ Return sorted copy of Index diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index 6d9e32433cd1e..01a93b712b42c 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -739,15 +739,6 @@ def is_full(self): values = self.values return ((values[1:] - values[:-1]) < 2).all() - def factorize(self): - """ - Specialized factorize that boxes uniques - """ - from pandas.core.algorithms import factorize - labels, uniques = factorize(self.values) - uniques = PeriodIndex(ordinal=uniques, freq=self.freq) - return labels, uniques - @property def freqstr(self): return self.freq diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index a6326794c1b12..43a4d4ff1239b 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -2175,6 +2175,35 @@ def test_slice_keep_name(self): idx = period_range('20010101', periods=10, freq='D', name='bob') self.assertEqual(idx.name, idx[1:].name) + def test_factorize(self): + idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02', + '2014-03', '2014-03'], freq='M') + + exp_arr = np.array([0, 0, 1, 1, 2, 2]) + exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M') + + arr, idx = idx1.factorize() + self.assert_numpy_array_equal(arr, exp_arr) + self.assert_(idx.equals(exp_idx)) + + arr, idx = idx1.factorize(sort=True) + self.assert_numpy_array_equal(arr, exp_arr) + self.assert_(idx.equals(exp_idx)) + + idx2 = pd.PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01', + '2014-03', '2014-01'], freq='M') + + exp_arr = np.array([2, 2, 1, 0, 2, 0]) + arr, idx = idx2.factorize(sort=True) + self.assert_numpy_array_equal(arr, exp_arr) + self.assert_(idx.equals(exp_idx)) + + exp_arr = np.array([0, 0, 1, 2, 0, 2]) + exp_idx = PeriodIndex(['2014-03', '2014-02', '2014-01'], freq='M') + arr, idx = idx2.factorize() + self.assert_numpy_array_equal(arr, exp_arr) + self.assert_(idx.equals(exp_idx)) + def _permute(obj): return obj.take(np.random.permutation(len(obj))) diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index 7690f118af482..0c0e7692b7d4c 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -2189,6 +2189,51 @@ def test_join_with_period_index(self): 'PeriodIndex-ed objects'): df.columns.join(s.index, how=join) + def test_factorize(self): + idx1 = DatetimeIndex(['2014-01', '2014-01', '2014-02', + '2014-02', '2014-03', '2014-03']) + + exp_arr = np.array([0, 0, 1, 1, 2, 2]) + exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03']) + + arr, idx = idx1.factorize() + self.assert_numpy_array_equal(arr, exp_arr) + self.assert_(idx.equals(exp_idx)) + + arr, idx = idx1.factorize(sort=True) + self.assert_numpy_array_equal(arr, exp_arr) + self.assert_(idx.equals(exp_idx)) + + # tz must be preserved + idx1 = idx1.tz_localize('Asia/Tokyo') + exp_idx = exp_idx.tz_localize('Asia/Tokyo') + + arr, idx = idx1.factorize() + self.assert_numpy_array_equal(arr, exp_arr) + self.assert_(idx.equals(exp_idx)) + + idx2 = pd.DatetimeIndex(['2014-03', '2014-03', '2014-02', '2014-01', + '2014-03', '2014-01']) + + exp_arr = np.array([2, 2, 1, 0, 2, 0]) + exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03']) + arr, idx = idx2.factorize(sort=True) + self.assert_numpy_array_equal(arr, exp_arr) + self.assert_(idx.equals(exp_idx)) + + exp_arr = np.array([0, 0, 1, 2, 0, 2]) + exp_idx = DatetimeIndex(['2014-03', '2014-02', '2014-01']) + arr, idx = idx2.factorize() + self.assert_numpy_array_equal(arr, exp_arr) + self.assert_(idx.equals(exp_idx)) + + # freq must be preserved + idx3 = date_range('2000-01', periods=4, freq='M', tz='Asia/Tokyo') + exp_arr = np.array([0, 1, 2, 3]) + arr, idx = idx3.factorize() + self.assert_numpy_array_equal(arr, exp_arr) + self.assert_(idx.equals(idx3)) + class TestDatetime64(tm.TestCase): """
As pointed in #7041, I prepared a PR to add `factorize` to `IndexOpsMixin`. As a side benefit, `Multiindex.from_arrays` can preserve original `DatetimeIndex.freq` and `tz`. (Related to #3950 and #6606. These issues are not solved yet because these use different methods to create `MultiIndex`). I would like to confirm following points before adding more tests. - What `Index.factorize` and `Series.factorize` should return as unique values, `ndarray` or `Index`? I think it should return `Index` to preserve `DatetimeIndex` attributes (`freq` and `tz`). - Is this should be added to `api.rst`?
https://api.github.com/repos/pandas-dev/pandas/pulls/7090
2014-05-10T07:10:27Z
2014-05-10T14:12:03Z
2014-05-10T14:12:03Z
2014-06-17T15:25:56Z
BUG: use size attribute (not method call)
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 2c7f6c5e181da..b13b2121ac0c4 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -142,6 +142,11 @@ def _last(x): else: return _last(x) + +def _count_compat(x, axis=0): + return x.size + + class Grouper(object): """ A Grouper allows the user to specify a groupby instruction for a target object @@ -721,8 +726,7 @@ def size(self): numeric_only=False, _convert=True) last = _groupby_function('last', 'last', _last_compat, numeric_only=False, _convert=True) - - _count = _groupby_function('_count', 'count', lambda x, axis=0: x.size(), + _count = _groupby_function('_count', 'count', _count_compat, numeric_only=False) def count(self, axis=0): @@ -1386,17 +1390,19 @@ def aggregate(self, values, how, axis=0): if is_numeric_dtype(values.dtype): values = com.ensure_float(values) is_numeric = True + out_dtype = 'f%d' % values.dtype.itemsize else: is_numeric = issubclass(values.dtype.type, (np.datetime64, np.timedelta64)) + out_dtype = 'float64' if is_numeric: values = values.view('int64') else: values = values.astype(object) # will be filled in Cython function - result = np.empty(out_shape, - dtype=np.dtype('f%d' % values.dtype.itemsize)) + result = np.empty(out_shape, dtype=out_dtype) + result.fill(np.nan) counts = np.zeros(self.ngroups, dtype=np.int64) @@ -1441,7 +1447,6 @@ def _aggregate(self, result, counts, values, how, is_numeric): chunk = chunk.squeeze() agg_func(result[:, :, i], counts, chunk, comp_ids) else: - #import ipdb; ipdb.set_trace() # XXX BREAKPOINT agg_func(result, counts, values, comp_ids) return trans_func(result) diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index 107bc46da49fa..8b957484f0c0d 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -4214,6 +4214,26 @@ def test_lower_int_prec_count(self): name='grp')) tm.assert_frame_equal(result, expected) + def test_count_uses_size_on_exception(self): + class RaisingObjectException(Exception): + pass + + class RaisingObject(object): + def __init__(self, msg='I will raise inside Cython'): + super(RaisingObject, self).__init__() + self.msg = msg + + def __eq__(self, other): + # gets called in Cython to check that raising calls the method + raise RaisingObjectException(self.msg) + + df = DataFrame({'a': [RaisingObject() for _ in range(4)], + 'grp': list('ab' * 2)}) + result = df.groupby('grp').count() + expected = DataFrame({'a': [2, 2]}, index=pd.Index(list('ab'), + name='grp')) + tm.assert_frame_equal(result, expected) + def assert_fp_equal(a, b): assert (np.abs(a - b) < 1e-12).all()
xref: #7055
https://api.github.com/repos/pandas-dev/pandas/pulls/7089
2014-05-09T19:08:36Z
2014-05-10T15:23:31Z
2014-05-10T15:23:31Z
2014-07-16T09:05:23Z
ENH/API: accept list-like percentiles in describe (WIP)
diff --git a/doc/source/basics.rst b/doc/source/basics.rst index 3538652c9bded..ca9751569336c 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -454,6 +454,7 @@ non-null values: series[10:20] = 5 series.nunique() +.. _basics.describe: Summarizing data: describe ~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -471,7 +472,13 @@ course): frame.ix[::2] = np.nan frame.describe() -.. _basics.describe: +You can select specific percentiles to include in the output: + +.. ipython:: python + + series.describe(percentiles=[.05, .25, .75, .95]) + +By default, the median is always included. For a non-numerical Series object, `describe` will give a simple summary of the number of unique values and most frequently occurring values: @@ -482,6 +489,7 @@ number of unique values and most frequently occurring values: s = Series(['a', 'a', 'b', 'b', 'a', 'a', np.nan, 'c', 'd', 'a']) s.describe() + There also is a utility function, ``value_range`` which takes a DataFrame and returns a series with the minimum/maximum values in the DataFrame. diff --git a/doc/source/release.rst b/doc/source/release.rst index 946bdcd48cfaf..0aec7d9cb3837 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -204,6 +204,8 @@ API Changes - Produce :class:`~pandas.io.parsers.ParserWarning` on fallback to python parser when no options are ignored (:issue:`6607`) - Added ``factorize`` functions to ``Index`` and ``Series`` to get indexer and unique values (:issue:`7090`) +- :meth:`DataFrame.describe` on a DataFrame with a mix of Timestamp and string like objects + returns a different Index (:issue:`7088`). Previously the index was unintentionally sorted. Deprecations ~~~~~~~~~~~~ @@ -250,6 +252,10 @@ Deprecations - The support for the 'mysql' flavor when using DBAPI connection objects has been deprecated. MySQL will be further supported with SQLAlchemy engines (:issue:`6900`). +- The `percentile_width` keyword argument in :meth:`~DataFrame.describe` has been deprecated. + Use the `percentiles` keyword instead, which takes a list of percentiles to display. The + default output is unchanged. + Prior Version Deprecations/Changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -339,6 +345,7 @@ Improvements to existing features - ``boxplot`` now supports ``layout`` keyword (:issue:`6769`) - Regression in the display of a MultiIndexed Series with ``display.max_rows`` is less than the length of the series (:issue:`7101`) +- :meth:`~DataFrame.describe` now accepts an array of percentiles to include in the summary statistics (:issue:`4196`) .. _release.bug_fixes-0.14.0: diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 725033ebcb82b..3c3c5ddcfcee7 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -196,6 +196,8 @@ API changes - accept ``TextFileReader`` in ``concat``, which was affecting a common user idiom (:issue:`6583`), this was a regression from 0.13.1 - Added ``factorize`` functions to ``Index`` and ``Series`` to get indexer and unique values (:issue:`7090`) +- ``describe`` on a DataFrame with a mix of Timestamp and string like objects returns a different Index (:issue:`7088`). + Previously the index was unintentionally sorted. .. _whatsnew_0140.display: @@ -509,6 +511,10 @@ Deprecations - The support for the 'mysql' flavor when using DBAPI connection objects has been deprecated. MySQL will be further supported with SQLAlchemy engines (:issue:`6900`). +- The `percentile_width` keyword argument in :meth:`~DataFrame.describe` has been deprecated. + Use the `percentiles` keyword instead, which takes a list of percentiles to display. The + default output is unchanged. + .. _whatsnew_0140.enhancements: Enhancements @@ -575,6 +581,7 @@ Enhancements - ``CustomBuisnessMonthBegin`` and ``CustomBusinessMonthEnd`` are now available (:issue:`6866`) - :meth:`Series.quantile` and :meth:`DataFrame.quantile` now accept an array of quantiles. +- :meth:`~DataFrame.describe` now accepts an array of percentiles to include in the summary statistics (:issue:`4196`) - ``pivot_table`` can now accept ``Grouper`` by ``index`` and ``columns`` keywords (:issue:`6913`) .. ipython:: python diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 773270ba1d593..6747ea6e5f516 100755 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3805,54 +3805,6 @@ def corrwith(self, other, axis=0, drop=False): return correl - def describe(self, percentile_width=50): - """ - Generate various summary statistics of each column, excluding - NaN values. These include: count, mean, std, min, max, and - lower%/50%/upper% percentiles - - Parameters - ---------- - percentile_width : float, optional - width of the desired uncertainty interval, default is 50, - which corresponds to lower=25, upper=75 - - Returns - ------- - DataFrame of summary statistics - """ - numdata = self._get_numeric_data() - - if len(numdata.columns) == 0: - return DataFrame(dict((k, v.describe()) - for k, v in compat.iteritems(self)), - columns=self.columns) - - lb = .5 * (1. - percentile_width / 100.) - ub = 1. - lb - - def pretty_name(x): - x *= 100 - if x == int(x): - return '%.0f%%' % x - else: - return '%.1f%%' % x - - destat_columns = ['count', 'mean', 'std', 'min', - pretty_name(lb), '50%', pretty_name(ub), - 'max'] - - destat = [] - - for i in range(len(numdata.columns)): - series = numdata.iloc[:, i] - destat.append([series.count(), series.mean(), series.std(), - series.min(), series.quantile(lb), series.median(), - series.quantile(ub), series.max()]) - - return self._constructor(lmap(list, zip(*destat)), - index=destat_columns, columns=numdata.columns) - #---------------------------------------------------------------------- # ndarray-like stats methods diff --git a/pandas/core/generic.py b/pandas/core/generic.py index b2e7120a21062..9172d174a1354 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -19,7 +19,7 @@ import pandas.core.common as com import pandas.core.datetools as datetools from pandas import compat, _np_version_under1p7 -from pandas.compat import map, zip, lrange, string_types, isidentifier +from pandas.compat import map, zip, lrange, string_types, isidentifier, lmap from pandas.core.common import (isnull, notnull, is_list_like, _values_from_object, _maybe_promote, _maybe_box_datetimelike, ABCSeries, SettingWithCopyError, SettingWithCopyWarning) @@ -3478,6 +3478,154 @@ def _convert_timedeltas(x): return np.abs(self) + _shared_docs['describe'] = """ + Generate various summary statistics, excluding NaN values. + + Parameters + ---------- + percentile_width : float, deprecated + The ``percentile_width`` argument will be removed in a future + version. Use ``percentiles`` instead. + width of the desired uncertainty interval, default is 50, + which corresponds to lower=25, upper=75 + percentiles : array-like, optional + The percentiles to include in the output. Should all + be in the interval [0, 1]. By default `percentiles` is + [.25, .5, .75], returning the 25th, 50th, and 75th percentiles. + + Returns + ------- + summary: %(klass)s of summary statistics + + Notes + ----- + For numeric dtypes the index includes: count, mean, std, min, + max, and lower, 50, and upper percentiles. + + If self is of object dtypes (e.g. timestamps or strings), the output + will include the count, unique, most common, and frequency of the + most common. Timestamps also include the first and last items. + + If multiple values have the highest count, then the + `count` and `most common` pair will be arbitrarily chosen from + among those with the highest count. + """ + + @Appender(_shared_docs['describe'] % _shared_doc_kwargs) + def describe(self, percentile_width=None, percentiles=None): + if self.ndim >= 3: + msg = "describe is not implemented on on Panel or PanelND objects." + raise NotImplementedError(msg) + + if percentile_width is not None and percentiles is not None: + msg = "Cannot specify both 'percentile_width' and 'percentiles.'" + raise ValueError(msg) + if percentiles is not None: + # get them all to be in [0, 1] + percentiles = np.asarray(percentiles) + if (percentiles > 1).any(): + percentiles = percentiles / 100.0 + msg = ("percentiles should all be in the interval [0, 1]. " + "Try {0} instead.") + raise ValueError(msg.format(list(percentiles))) + else: + # only warn if they change the default + if percentile_width is not None: + do_warn = True + else: + do_warn = False + percentile_width = percentile_width or 50 + lb = .5 * (1. - percentile_width / 100.) + ub = 1. - lb + percentiles = np.array([lb, 0.5, ub]) + if do_warn: + msg = ("The `percentile_width` keyword is deprecated. " + "Use percentiles={0} instead".format(list(percentiles))) + warnings.warn(msg, FutureWarning) + + # median should always be included + if (percentiles != 0.5).all(): # median isn't included + lh = percentiles[percentiles < .5] + uh = percentiles[percentiles > .5] + percentiles = np.hstack([lh, 0.5, uh]) + + # dtypes: numeric only, numeric mixed, objects only + data = self._get_numeric_data() + if self.ndim > 1: + if len(data._info_axis) == 0: + is_object = True + else: + is_object = False + else: + is_object = not self._is_numeric_mixed_type + + def pretty_name(x): + x *= 100 + if x == int(x): + return '%.0f%%' % x + else: + return '%.1f%%' % x + + def describe_numeric_1d(series, percentiles): + return ([series.count(), series.mean(), series.std(), + series.min()] + + [series.quantile(x) for x in percentiles] + + [series.max()]) + + def describe_categorical_1d(data): + if data.dtype == object: + names = ['count', 'unique'] + objcounts = data.value_counts() + result = [data.count(), len(objcounts)] + if result[1] > 0: + names += ['top', 'freq'] + top, freq = objcounts.index[0], objcounts.iloc[0] + result += [top, freq] + + elif issubclass(data.dtype.type, np.datetime64): + names = ['count', 'unique'] + asint = data.dropna().values.view('i8') + objcounts = compat.Counter(asint) + result = [data.count(), len(objcounts)] + if result[1] > 0: + top, freq = objcounts.most_common(1)[0] + names += ['first', 'last', 'top', 'freq'] + result += [lib.Timestamp(asint.min()), + lib.Timestamp(asint.max()), + lib.Timestamp(top), freq] + + return pd.Series(result, index=names) + + if is_object: + if data.ndim == 1: + return describe_categorical_1d(self) + else: + result = pd.DataFrame(dict((k, describe_categorical_1d(v)) + for k, v in compat.iteritems(self)), + columns=self._info_axis, + index=['count', 'unique', 'first', 'last', + 'top', 'freq']) + # just objects, no datime + if pd.isnull(result.loc['first']).all(): + result = result.drop(['first', 'last'], axis=0) + return result + else: + stat_index = (['count', 'mean', 'std', 'min'] + + [pretty_name(x) for x in percentiles] + + ['max']) + if data.ndim == 1: + return pd.Series(describe_numeric_1d(data, percentiles), + index=stat_index) + else: + destat = [] + for i in range(len(data._info_axis)): # BAD + series = data.iloc[:, i] + destat.append(describe_numeric_1d(series, percentiles)) + + return self._constructor(lmap(list, zip(*destat)), + index=stat_index, + columns=data._info_axis) + _shared_docs['pct_change'] = """ Percent change over given number of periods. diff --git a/pandas/core/series.py b/pandas/core/series.py index d4b6039cd375e..d95f8da8097e9 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1267,67 +1267,6 @@ def multi(values, qs): def ptp(self, axis=None, out=None): return _values_from_object(self).ptp(axis, out) - def describe(self, percentile_width=50): - """ - Generate various summary statistics of Series, excluding NaN - values. These include: count, mean, std, min, max, and - lower%/50%/upper% percentiles - - Parameters - ---------- - percentile_width : float, optional - width of the desired uncertainty interval, default is 50, - which corresponds to lower=25, upper=75 - - Returns - ------- - desc : Series - """ - from pandas.compat import Counter - - if self.dtype == object: - names = ['count', 'unique'] - objcounts = Counter(self.dropna().values) - data = [self.count(), len(objcounts)] - if data[1] > 0: - names += ['top', 'freq'] - top, freq = objcounts.most_common(1)[0] - data += [top, freq] - - elif issubclass(self.dtype.type, np.datetime64): - names = ['count', 'unique'] - asint = self.dropna().values.view('i8') - objcounts = Counter(asint) - data = [self.count(), len(objcounts)] - if data[1] > 0: - top, freq = objcounts.most_common(1)[0] - names += ['first', 'last', 'top', 'freq'] - data += [lib.Timestamp(asint.min()), - lib.Timestamp(asint.max()), - lib.Timestamp(top), freq] - else: - - lb = .5 * (1. - percentile_width / 100.) - ub = 1. - lb - - def pretty_name(x): - x *= 100 - if x == int(x): - return '%.0f%%' % x - else: - return '%.1f%%' % x - - names = ['count'] - data = [self.count()] - names += ['mean', 'std', 'min', pretty_name(lb), '50%', - pretty_name(ub), 'max'] - data += [self.mean(), self.std(), self.min(), - self.quantile( - lb), self.median(), self.quantile(ub), - self.max()] - - return self._constructor(data, index=names).__finalize__(self) - def corr(self, other, method='pearson', min_periods=None): """ diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 7365e4be187b0..4264f5b7e0931 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -11224,40 +11224,6 @@ def test_rank_na_option(self): assert_almost_equal(ranks0.values, exp0) assert_almost_equal(ranks1.values, exp1) - def test_describe(self): - desc = self.tsframe.describe() - desc = self.mixed_frame.describe() - desc = self.frame.describe() - - def test_describe_percentiles(self): - desc = self.frame.describe(percentile_width=50) - assert '75%' in desc.index - assert '25%' in desc.index - - desc = self.frame.describe(percentile_width=95) - assert '97.5%' in desc.index - assert '2.5%' in desc.index - - def test_describe_no_numeric(self): - df = DataFrame({'A': ['foo', 'foo', 'bar'] * 8, - 'B': ['a', 'b', 'c', 'd'] * 6}) - desc = df.describe() - expected = DataFrame(dict((k, v.describe()) - for k, v in compat.iteritems(df)), - columns=df.columns) - assert_frame_equal(desc, expected) - - df = DataFrame({'time': self.tsframe.index}) - desc = df.describe() - assert(desc.time['first'] == min(self.tsframe.index)) - - def test_describe_empty_int_columns(self): - df = DataFrame([[0, 1], [1, 2]]) - desc = df[df[0] < 0].describe() # works - assert_series_equal(desc.xs('count'), - Series([0, 0], dtype=float, name='count')) - self.assert_(isnull(desc.ix[1:]).all().all()) - def test_axis_aliases(self): f = self.frame diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index 42bb76930d783..57ec9d0eb8981 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -646,6 +646,59 @@ def test_interp_datetime64(self): expected = Series([1., 1., 3.], index=date_range('1/1/2000', periods=3)) assert_series_equal(result, expected) + def test_describe(self): + _ = self.series.describe() + _ = self.ts.describe() + + def test_describe_percentiles(self): + with tm.assert_produces_warning(FutureWarning): + desc = self.series.describe(percentile_width=50) + assert '75%' in desc.index + assert '25%' in desc.index + + with tm.assert_produces_warning(FutureWarning): + desc = self.series.describe(percentile_width=95) + assert '97.5%' in desc.index + assert '2.5%' in desc.index + + def test_describe_objects(self): + s = Series(['a', 'b', 'b', np.nan, np.nan, np.nan, 'c', 'd', 'a', 'a']) + result = s.describe() + expected = Series({'count': 7, 'unique': 4, + 'top': 'a', 'freq': 3}, index=result.index) + assert_series_equal(result, expected) + + dt = list(self.ts.index) + dt.append(dt[0]) + ser = Series(dt) + rs = ser.describe() + min_date = min(dt) + max_date = max(dt) + xp = Series({'count': len(dt), + 'unique': len(self.ts.index), + 'first': min_date, 'last': max_date, 'freq': 2, + 'top': min_date}, index=rs.index) + assert_series_equal(rs, xp) + + def test_describe_empty(self): + result = pd.Series().describe() + + self.assertEqual(result['count'], 0) + self.assert_(result.drop('count').isnull().all()) + + nanSeries = Series([np.nan]) + nanSeries.name = 'NaN' + result = nanSeries.describe() + self.assertEqual(result['count'], 0) + self.assert_(result.drop('count').isnull().all()) + + def test_describe_none(self): + noneSeries = Series([None]) + noneSeries.name = 'None' + assert_series_equal(noneSeries.describe(), + Series([0, 0], index=['count', 'unique'])) + + class TestDataFrame(tm.TestCase, Generic): _typ = DataFrame _comparator = lambda self, x, y: assert_frame_equal(x,y) @@ -708,7 +761,6 @@ def test_interp_combo(self): expected = Series([1, 2, 3, 4]) assert_series_equal(result, expected) - def test_interp_nan_idx(self): df = DataFrame({'A': [1, 2, np.nan, 4], 'B': [np.nan, 2, 3, 4]}) df = df.set_index('A') @@ -859,6 +911,115 @@ def test_interp_ignore_all_good(self): result = df[['B', 'D']].interpolate(downcast=None) assert_frame_equal(result, df[['B', 'D']]) + def test_describe(self): + desc = tm.makeDataFrame().describe() + desc = tm.makeMixedDataFrame().describe() + desc = tm.makeTimeDataFrame().describe() + + def test_describe_percentiles(self): + with tm.assert_produces_warning(FutureWarning): + desc = tm.makeDataFrame().describe(percentile_width=50) + assert '75%' in desc.index + assert '25%' in desc.index + + with tm.assert_produces_warning(FutureWarning): + desc = tm.makeDataFrame().describe(percentile_width=95) + assert '97.5%' in desc.index + assert '2.5%' in desc.index + + def test_describe_quantiles_both(self): + with tm.assertRaises(ValueError): + tm.makeDataFrame().describe(percentile_width=50, + percentiles=[25, 75]) + + def test_describe_percentiles_percent_or_raw(self): + df = tm.makeDataFrame() + with tm.assertRaises(ValueError): + df.describe(percentiles=[10, 50, 100]) + + def test_describe_percentiles_equivalence(self): + df = tm.makeDataFrame() + d1 = df.describe() + d2 = df.describe(percentiles=[.25, .75]) + assert_frame_equal(d1, d2) + + def test_describe_percentiles_insert_median(self): + df = tm.makeDataFrame() + d1 = df.describe(percentiles=[.25, .75]) + d2 = df.describe(percentiles=[.25, .5, .75]) + assert_frame_equal(d1, d2) + + # none above + d1 = df.describe(percentiles=[.25, .45]) + d2 = df.describe(percentiles=[.25, .45, .5]) + assert_frame_equal(d1, d2) + + # none below + d1 = df.describe(percentiles=[.75, 1]) + d2 = df.describe(percentiles=[.5, .75, 1]) + assert_frame_equal(d1, d2) + + def test_describe_no_numeric(self): + df = DataFrame({'A': ['foo', 'foo', 'bar'] * 8, + 'B': ['a', 'b', 'c', 'd'] * 6}) + desc = df.describe() + expected = DataFrame(dict((k, v.describe()) + for k, v in compat.iteritems(df)), + columns=df.columns) + assert_frame_equal(desc, expected) + + ts = tm.makeTimeSeries() + df = DataFrame({'time': ts.index}) + desc = df.describe() + self.assertEqual(desc.time['first'], min(ts.index)) + + def test_describe_empty_int_columns(self): + df = DataFrame([[0, 1], [1, 2]]) + desc = df[df[0] < 0].describe() # works + assert_series_equal(desc.xs('count'), + Series([0, 0], dtype=float, name='count')) + self.assert_(isnull(desc.ix[1:]).all().all()) + + def test_describe_objects(self): + df = DataFrame({"C1": ['a', 'a', 'c'], "C2": ['d', 'd', 'f']}) + result = df.describe() + expected = DataFrame({"C1": [3, 2, 'a', 2], "C2": [3, 2, 'd', 2]}, + index=['count', 'unique', 'top', 'freq']) + assert_frame_equal(result, expected) + + df = DataFrame({"C1": pd.date_range('2010-01-01', periods=4, freq='D')}) + result = df.describe() + expected = DataFrame({"C1": [4, 4, pd.Timestamp('2010-01-01'), + pd.Timestamp('2010-01-04'), + pd.Timestamp('2010-01-01'), 1]}, + index=['count', 'unique', 'first', 'last', 'top', + 'freq']) + assert_frame_equal(result, expected) + + # mix time and str + df['C2'] = ['a', 'a', 'b', 'c'] + result = df.describe() + # when mix of dateimte / obj the index gets reordered. + expected['C2'] = [4, 3, np.nan, np.nan, 'a', 2] + assert_frame_equal(result, expected) + + # just str + expected = DataFrame({'C2': [4, 3, 'a', 2]}, + index=['count', 'unique', 'top', 'freq']) + result = df[['C2']].describe() + + # mix of time, str, numeric + df['C3'] = [2, 4, 6, 8] + result = df.describe() + expected = DataFrame({"C3": [4., 5., 2.5819889, 2., 3.5, 5., 6.5, 8.]}, + index=['count', 'mean', 'std', 'min', '25%', + '50%', '75%', 'max']) + assert_frame_equal(result, expected) + assert_frame_equal(df.describe(), df[['C3']].describe()) + + assert_frame_equal(df[['C1', 'C3']].describe(), df[['C3']].describe()) + assert_frame_equal(df[['C2', 'C3']].describe(), df[['C3']].describe()) + def test_no_order(self): _skip_if_no_scipy() s = Series([0, 1, np.nan, 3]) @@ -1053,6 +1214,9 @@ def test_equals(self): df2 = df1.set_index(['floats'], append=True) self.assert_(df3.equals(df2)) + def test_describe_raises(self): + with tm.assertRaises(NotImplementedError): + tm.makePanel().describe() if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 2bb720e1644ad..6e7c9edfc4025 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -2219,56 +2219,6 @@ def test_quantile_multi(self): Timestamp('2000-01-10 19:12:00')], index=[.2, .2])) - def test_describe(self): - _ = self.series.describe() - _ = self.ts.describe() - - def test_describe_percentiles(self): - desc = self.series.describe(percentile_width=50) - assert '75%' in desc.index - assert '25%' in desc.index - - desc = self.series.describe(percentile_width=95) - assert '97.5%' in desc.index - assert '2.5%' in desc.index - - def test_describe_objects(self): - s = Series(['a', 'b', 'b', np.nan, np.nan, np.nan, 'c', 'd', 'a', 'a']) - result = s.describe() - expected = Series({'count': 7, 'unique': 4, - 'top': 'a', 'freq': 3}, index=result.index) - assert_series_equal(result, expected) - - dt = list(self.ts.index) - dt.append(dt[0]) - ser = Series(dt) - rs = ser.describe() - min_date = min(dt) - max_date = max(dt) - xp = Series({'count': len(dt), - 'unique': len(self.ts.index), - 'first': min_date, 'last': max_date, 'freq': 2, - 'top': min_date}, index=rs.index) - assert_series_equal(rs, xp) - - def test_describe_empty(self): - result = self.empty.describe() - - self.assertEqual(result['count'], 0) - self.assert_(result.drop('count').isnull().all()) - - nanSeries = Series([np.nan]) - nanSeries.name = 'NaN' - result = nanSeries.describe() - self.assertEqual(result['count'], 0) - self.assert_(result.drop('count').isnull().all()) - - def test_describe_none(self): - noneSeries = Series([None]) - noneSeries.name = 'None' - assert_series_equal(noneSeries.describe(), - Series([0, 0], index=['count', 'unique'])) - def test_append(self): appendedSeries = self.series.append(self.objSeries) for idx, value in compat.iteritems(appendedSeries):
Closes https://github.com/pydata/pandas/issues/4196 This is for frames. I'm going to refactor this into generic since to cover series / frames. A couple questions: - API wise, I added a new kwarg `percentiles`. For backwards compat, we keep the `percentile_width` kwarg. I changed the default `percentile_width` from 50 to `None` (but the default output is the same) Cases: 1. You specify `percentile_width` and `percentiles` -> `ValueError` 2. You specify neither `percentile_width` nor `percentiles` -> `percentile_width` set to 50 and same as before 3. You specify one of those, everything goes as expected. - I'm accepting either decimals (e.g. [0.25, .5, .75]) or percentages (e.g. [25, 50, 75]). Those two are equivalent in output. Should I move this logic to the `.quantile` to be more consistent? - I'm choosing to not sort the provided percentiles. It's easy for the user to sort, but hard for them to unsort if for some reason they want it in a specific order. I'd rather not add another kwarg.
https://api.github.com/repos/pandas-dev/pandas/pulls/7088
2014-05-09T18:17:16Z
2014-05-14T00:45:22Z
2014-05-14T00:45:22Z
2016-11-03T12:37:59Z
BUG: cache coherence issue with chain indexing and setitem (GH7084)
diff --git a/doc/source/release.rst b/doc/source/release.rst index 8422efd4247d1..5eaa0aa469a3c 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -482,6 +482,8 @@ Bug Fixes were being passed to plotting method - :func:`read_fwf` treats ``None`` in ``colspec`` like regular python slices. It now reads from the beginning or until the end of the line when ``colspec`` contains a ``None`` (previously raised a ``TypeError``) +- Bug in cache coherence with chained indexing and slicing; add ``_is_view`` property to ``NDFrame`` to correctly predict + views; mark ``is_copy`` on ``xs` only if its an actual copy (and not a view) (:issue:`7084`) pandas 0.13.1 ------------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index ba237e5cd04c4..4345154437bf5 100755 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1632,7 +1632,7 @@ def _ixs(self, i, axis=0): name=label, fastpath=True) # this is a cached value, mark it so - result._set_as_cached(i, self) + result._set_as_cached(label, self) return result diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 01af7534d458d..b2e7120a21062 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1091,6 +1091,11 @@ def _is_cached(self): cacher = getattr(self, '_cacher', None) return cacher is not None + @property + def _is_view(self): + """ boolean : return if I am a view of another array """ + return self._data.is_view + def _maybe_update_cacher(self, clear=False): """ see if we need to update our parent cacher if clear, then clear our cache """ @@ -1372,7 +1377,9 @@ def xs(self, key, axis=0, level=None, copy=None, drop_level=True): result = self[loc] result.index = new_index - result._set_is_copy(self) + # this could be a view + # but only in a single-dtyped view slicable case + result._set_is_copy(self, copy=not result._is_view) return result _xs = xs diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 48d047baaa6c0..de93330f10271 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -2289,6 +2289,13 @@ def is_datelike_mixed_type(self): self._consolidate_inplace() return any([block.is_datelike for block in self.blocks]) + @property + def is_view(self): + """ return a boolean if we are a single block and are a view """ + if len(self.blocks) == 1: + return self.blocks[0].values.base is not None + return False + def get_bool_data(self, copy=False): """ Parameters diff --git a/pandas/core/series.py b/pandas/core/series.py index 74f038b2bad23..fc9b9ad936351 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -693,6 +693,7 @@ def _set_values(self, key, value): if isinstance(key, Series): key = key.values self._data = self._data.setitem(indexer=key, value=value) + self._maybe_update_cacher() # help out SparseSeries _get_val_at = ndarray.__getitem__ diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index d42babc7cddbe..e36fdffc8cc31 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -2666,9 +2666,10 @@ def test_cache_updating(self): df.index = index # setting via chained assignment - def f(): - df.loc[0]['z'].iloc[0] = 1. - self.assertRaises(com.SettingWithCopyError, f) + # but actually works, since everything is a view + df.loc[0]['z'].iloc[0] = 1. + result = df.loc[(0,0),'z'] + self.assertEqual(result, 1) # correct setting df.loc[(0,0),'z'] = 2 @@ -2710,6 +2711,28 @@ def test_setitem_cache_updating(self): self.assertEqual(df.ix[0,'c'], 0.0) self.assertEqual(df.ix[7,'c'], 1.0) + # GH 7084 + # not updating cache on series setting with slices + out = DataFrame({'A': [0, 0, 0]}, index=date_range('5/7/2014', '5/9/2014')) + df = DataFrame({'C': ['A', 'A', 'A'], 'D': [100, 200, 300]}) + + #loop through df to update out + six = Timestamp('5/7/2014') + eix = Timestamp('5/9/2014') + for ix, row in df.iterrows(): + out[row['C']][six:eix] = out[row['C']][six:eix] + row['D'] + + expected = DataFrame({'A': [600, 600, 600]}, index=date_range('5/7/2014', '5/9/2014')) + assert_frame_equal(out, expected) + assert_series_equal(out['A'], expected['A']) + + out = DataFrame({'A': [0, 0, 0]}, index=date_range('5/7/2014', '5/9/2014')) + for ix, row in df.iterrows(): + out.loc[six:eix,row['C']] += row['D'] + + assert_frame_equal(out, expected) + assert_series_equal(out['A'], expected['A']) + def test_setitem_chained_setfault(self): # GH6026
closes #7084
https://api.github.com/repos/pandas-dev/pandas/pulls/7087
2014-05-09T13:25:30Z
2014-05-09T18:21:49Z
2014-05-09T18:21:49Z
2014-06-26T19:40:48Z
Tidy representation when truncating dfs
diff --git a/doc/source/_static/trunc_after.png b/doc/source/_static/trunc_after.png new file mode 100644 index 0000000000000..950690de8d1ee Binary files /dev/null and b/doc/source/_static/trunc_after.png differ diff --git a/doc/source/_static/trunc_before.png b/doc/source/_static/trunc_before.png new file mode 100644 index 0000000000000..36ac203422e76 Binary files /dev/null and b/doc/source/_static/trunc_before.png differ diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index c033debbb6808..d0696a0be156d 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -23,6 +23,8 @@ users upgrade to this version. - :ref:`API Changes <whatsnew_0140.api>` +- :ref:`Groupby API Changes <whatsnew_0140.groupby>` + - :ref:`Performance Improvements <whatsnew_0140.performance>` - :ref:`Prior Deprecations <whatsnew_0140.prior_deprecations>` @@ -216,6 +218,24 @@ API changes Display Changes ~~~~~~~~~~~~~~~ +- The default way of printing large DataFrames has changed. DataFrames + exceeding ``max_rows`` and/or ``max_columns`` are now displayed in a + centrally truncated view, consistent with the printing of a + :class:`pandas.Series` (:issue:`5603`). + + In previous versions, a DataFrame was truncated once the dimension + constraints were reached and an ellipse (...) signaled that part of + the data was cut off. + + .. image:: _static/trunc_before.png + :alt: The previous look of truncate. + + In the current version, large DataFrames are centrally truncated, + showing a preview of head and tail in both dimensions. + + .. image:: _static/trunc_after.png + :alt: The new look. + - allow option ``'truncate'`` for ``display.show_dimensions`` to only show the dimensions if the frame is truncated (:issue:`6547`). diff --git a/pandas/core/format.py b/pandas/core/format.py index 49e98fe9911c5..0905640c85ac1 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -1,8 +1,10 @@ + #coding: utf-8 from __future__ import print_function # pylint: disable=W0141 import sys +import re from pandas.core.base import PandasObject from pandas.core.common import adjoin, isnull, notnull @@ -309,38 +311,65 @@ def __init__(self, frame, buf=None, columns=None, col_space=None, else: self.columns = frame.columns + self._chk_truncate() + + def _chk_truncate(self): + from pandas.tools.merge import concat + + truncate_h = self.max_cols and (len(self.columns) > self.max_cols) + truncate_v = self.max_rows and (len(self.frame) > self.max_rows) + + # Cut the data to the information actually printed + max_cols = self.max_cols + max_rows = self.max_rows + frame = self.frame + if truncate_h: + if max_cols > 1: + col_num = (max_cols // 2) + frame = concat( (frame.iloc[:,:col_num],frame.iloc[:,-col_num:]),axis=1 ) + else: + col_num = max_cols + frame = frame.iloc[:,:max_cols] + self.tr_col_num = col_num + if truncate_v: + if max_rows > 1: + row_num = max_rows // 2 + frame = concat( (frame.iloc[:row_num,:],frame.iloc[-row_num:,:]) ) + else: + row_num = max_rows + frame = frame.iloc[:max_rows,:] + self.tr_row_num = row_num + + self.tr_frame = frame + self.truncate_h = truncate_h + self.truncate_v = truncate_v + self.is_truncated = self.truncate_h or self.truncate_v + def _to_str_columns(self): """ Render a DataFrame to a list of columns (as lists of strings). """ + _strlen = _strlen_func() + frame = self.tr_frame # may include levels names also - str_index = self._get_formatted_index() - str_columns = self._get_formatted_column_labels() - - _strlen = _strlen_func() + str_index = self._get_formatted_index(frame) - cols_to_show = self.columns[:self.max_cols] - self.truncated_h = truncate_h = self.max_cols and (len(self.columns) > self.max_cols) - self.truncated_v = truncate_v = self.max_rows and (len(self.frame) > self.max_rows) - self.is_truncated = self.truncated_h or self.truncated_v - if truncate_h: - cols_to_show = self.columns[:self.max_cols] - else: - cols_to_show = self.columns + str_columns = self._get_formatted_column_labels(frame) if self.header: stringified = [] - for i, c in enumerate(cols_to_show): - fmt_values = self._format_col(i) + col_headers = frame.columns + for i, c in enumerate(frame): cheader = str_columns[i] - max_colwidth = max(self.col_space or 0, *(_strlen(x) for x in cheader)) + fmt_values = self._format_col(i) + fmt_values = _make_fixed_width(fmt_values, self.justify, - minimum=max_colwidth, - truncated=truncate_v) + minimum=max_colwidth) + max_len = max(np.max([_strlen(x) for x in fmt_values]), max_colwidth) @@ -351,16 +380,47 @@ def _to_str_columns(self): stringified.append(cheader + fmt_values) else: - stringified = [_make_fixed_width(self._format_col(i), self.justify, - truncated=truncate_v) - for i, c in enumerate(cols_to_show)] + stringified = [] + for i, c in enumerate(frame): + formatter = self._get_formatter(i) + fmt_values = self._format_col(i) + fmt_values = _make_fixed_width(fmt_values, self.justify) + + stringified.append(fmt_values) strcols = stringified if self.index: strcols.insert(0, str_index) + + # Add ... to signal truncated + truncate_h = self.truncate_h + truncate_v = self.truncate_v + if truncate_h: - strcols.append(([''] * len(str_columns[-1])) - + (['...'] * min(len(self.frame), self.max_rows))) + col_num = self.tr_col_num + col_width = len(strcols[col_num][0]) # infer from column header + strcols.insert(col_num + 1, ['...'.center(col_width)] * (len(str_index))) + if truncate_v: + n_header_rows = len(str_index) - len(frame) + row_num = self.tr_row_num + for ix,col in enumerate(strcols): + cwidth = len(strcols[ix][row_num]) # infer from above row + is_dot_col = False + if truncate_h: + is_dot_col = ix == col_num + 1 + if cwidth > 3 or is_dot_col: + my_str = '...' + else: + my_str = '..' + + if ix == 0: + dot_str = my_str.ljust(cwidth) + elif is_dot_col: + dot_str = my_str.center(cwidth) + else: + dot_str = my_str.rjust(cwidth) + + strcols[ix].insert(row_num + n_header_rows, dot_str) return strcols @@ -510,9 +570,10 @@ def write(buf, frame, column_format, strcols, longtable=False): 'method') def _format_col(self, i): + frame = self.tr_frame formatter = self._get_formatter(i) return format_array( - (self.frame.iloc[:self.max_rows_displayed, i]).get_values(), + (frame.iloc[:, i]).get_values(), formatter, float_format=self.float_format, na_rep=self.na_rep, space=self.col_space ) @@ -533,16 +594,13 @@ def to_html(self, classes=None): raise TypeError('buf is not a file name and it has no write ' ' method') - def _get_formatted_column_labels(self): + def _get_formatted_column_labels(self,frame): from pandas.core.index import _sparsify def is_numeric_dtype(dtype): return issubclass(dtype.type, np.number) - if self.max_cols: - columns = self.columns[:self.max_cols] - else: - columns = self.columns + columns = frame.columns if isinstance(columns, MultiIndex): fmt_columns = columns.format(sparsify=False, adjoin=False) @@ -580,13 +638,10 @@ def has_index_names(self): def has_column_names(self): return _has_names(self.frame.columns) - def _get_formatted_index(self): + def _get_formatted_index(self,frame): # Note: this is only used by to_string(), not by to_html(). - if self.max_rows: - index = self.frame.index[:self.max_rows] - else: - index = self.frame.index - columns = self.frame.columns + index = frame.index + columns = frame.columns show_index_names = self.show_index_names and self.has_index_names show_col_names = (self.show_index_names and self.has_column_names) @@ -633,7 +688,7 @@ def __init__(self, formatter, classes=None, max_rows=None, max_cols=None): self.classes = classes self.frame = self.fmt.frame - self.columns = formatter.columns + self.columns = self.fmt.tr_frame.columns self.elements = [] self.bold_rows = self.fmt.kwds.get('bold_rows', False) self.escape = self.fmt.kwds.get('escape', True) @@ -724,6 +779,7 @@ def write_result(self, buf): _put_lines(buf, self.elements) def _write_header(self, indent): + truncate_h = self.fmt.truncate_h if not self.fmt.header: # write nothing return indent @@ -745,9 +801,7 @@ def _column_header(): else: if self.fmt.index: row.append(self.columns.name or '') - row.extend(self.columns[:self.max_cols]) - if len(self.columns) > self.max_cols: - row.append('') + row.extend(self.columns) return row self.write('<thead>', indent) @@ -758,16 +812,13 @@ def _column_header(): if isinstance(self.columns, MultiIndex): template = 'colspan="%d" halign="left"' - # GH3547 - sentinel = com.sentinel_factory() - levels = self.columns.format(sparsify=sentinel, adjoin=False, - names=False) - # Truncate column names - if len(levels[0]) > self.max_cols: - levels = [lev[:self.max_cols] for lev in levels] - truncated = True + if self.fmt.sparsify: + # GH3547 + sentinel = com.sentinel_factory() else: - truncated = False + sentinel = None + levels = self.columns.format(sparsify=sentinel, + adjoin=False, names=False) level_lengths = _get_level_lengths(levels, sentinel) @@ -778,7 +829,6 @@ def _column_header(): name = self.columns.names[lnum] row = [''] * (row_levels - 1) + ['' if name is None else com.pprint_thing(name)] - tags = {} j = len(row) for i, v in enumerate(values): @@ -789,9 +839,16 @@ def _column_header(): continue j += 1 row.append(v) - - if truncated: - row.append('') + if truncate_h: + if self.fmt.sparsify and lnum == 0: + ins_col = row_levels + self.fmt.tr_col_num - 1 + row.insert(ins_col, '...') + + for tag in list(tags.keys()): + if tag >= ins_col: + tags[tag+1] = tags.pop(tag) + else: + row.insert(row_levels + self.fmt.tr_col_num, '...') self.write_tr(row, indent, self.indent_delta, tags=tags, header=True) @@ -799,6 +856,9 @@ def _column_header(): col_row = _column_header() align = self.fmt.justify + if truncate_h: + col_row.insert(self.fmt.tr_col_num + 1, '...') + self.write_tr(col_row, indent, self.indent_delta, header=True, align=align) @@ -820,14 +880,13 @@ def _write_body(self, indent): fmt_values = {} for i in range(min(len(self.columns), self.max_cols)): fmt_values[i] = self.fmt._format_col(i) - truncated = (len(self.columns) > self.max_cols) # write values if self.fmt.index: if isinstance(self.frame.index, MultiIndex): self._write_hierarchical_rows(fmt_values, indent) else: - self._write_regular_rows(fmt_values, indent, truncated) + self._write_regular_rows(fmt_values, indent) else: for i in range(len(self.frame)): row = [fmt_values[j][i] for j in range(len(self.columns))] @@ -839,55 +898,62 @@ def _write_body(self, indent): return indent - def _write_regular_rows(self, fmt_values, indent, truncated): - ncols = min(len(self.columns), self.max_cols) - nrows = min(len(self.frame), self.max_rows) + def _write_regular_rows(self, fmt_values, indent): + truncate_h = self.fmt.truncate_h + truncate_v = self.fmt.truncate_v + + ncols = len(self.fmt.tr_frame.columns) + nrows = len(self.fmt.tr_frame) fmt = self.fmt._get_formatter('__index__') if fmt is not None: - index_values = self.frame.index[:nrows].map(fmt) + index_values = self.fmt.tr_frame.index.map(fmt) else: - index_values = self.frame.index[:nrows].format() + index_values = self.fmt.tr_frame.index.format() for i in range(nrows): + + if truncate_v and i == (self.fmt.tr_row_num): + str_sep_row = [ '...' for ele in row ] + self.write_tr(str_sep_row, indent, self.indent_delta, tags=None, + nindex_levels=1) + row = [] row.append(index_values[i]) row.extend(fmt_values[j][i] for j in range(ncols)) - if truncated: - row.append('...') - self.write_tr(row, indent, self.indent_delta, tags=None, - nindex_levels=1) - if len(self.frame) > self.max_rows: - row = [''] + (['...'] * ncols) + if truncate_h: + dot_col_ix = self.fmt.tr_col_num + 1 + row.insert(dot_col_ix, '...') self.write_tr(row, indent, self.indent_delta, tags=None, nindex_levels=1) def _write_hierarchical_rows(self, fmt_values, indent): template = 'rowspan="%d" valign="top"' - frame = self.frame - ncols = min(len(self.columns), self.max_cols) - nrows = min(len(self.frame), self.max_rows) - - truncate = (len(frame) > self.max_rows) + truncate_h = self.fmt.truncate_h + truncate_v = self.fmt.truncate_v + frame = self.fmt.tr_frame + ncols = len(frame.columns) + nrows = len(frame) + row_levels = self.frame.index.nlevels - idx_values = frame.index[:nrows].format(sparsify=False, adjoin=False, + idx_values = frame.index.format(sparsify=False, adjoin=False, names=False) idx_values = lzip(*idx_values) if self.fmt.sparsify: - # GH3547 sentinel = com.sentinel_factory() - levels = frame.index[:nrows].format(sparsify=sentinel, + levels = frame.index.format(sparsify=sentinel, adjoin=False, names=False) - # Truncate row names - if truncate: - levels = [lev[:self.max_rows] for lev in levels] level_lengths = _get_level_lengths(levels, sentinel) - for i in range(min(len(frame), self.max_rows)): + for i in range(nrows): + if truncate_v and i == (self.fmt.tr_row_num): + str_sep_row = [ '...' ] * (len(row) + sparse_offset) + self.write_tr(str_sep_row, indent, self.indent_delta, tags=None) + row = [] tags = {} @@ -905,6 +971,8 @@ def _write_hierarchical_rows(self, fmt_values, indent): row.append(v) row.extend(fmt_values[j][i] for j in range(ncols)) + if truncate_h: + row.insert(row_levels - sparse_offset + self.fmt.tr_col_num, '...') self.write_tr(row, indent, self.indent_delta, tags=tags, nindex_levels=len(levels) - sparse_offset) else: @@ -915,15 +983,11 @@ def _write_hierarchical_rows(self, fmt_values, indent): row = [] row.extend(idx_values[i]) row.extend(fmt_values[j][i] for j in range(ncols)) + if truncate_h: + row.insert(row_levels + self.fmt.tr_col_num, '...') self.write_tr(row, indent, self.indent_delta, tags=None, nindex_levels=frame.index.nlevels) - # Truncation markers (...) - if truncate: - row = ([''] * frame.index.nlevels) + (['...'] * ncols) - self.write_tr(row, indent, self.indent_delta, tags=None) - - def _get_level_lengths(levels, sentinel=''): from itertools import groupby @@ -1877,8 +1941,7 @@ def impl(x): return impl -def _make_fixed_width(strings, justify='right', minimum=None, truncated=False): - +def _make_fixed_width(strings, justify='right', minimum=None): if len(strings) == 0 or justify == 'all': return strings @@ -1909,9 +1972,6 @@ def just(x): result = [just(x) for x in strings] - if truncated: - result.append(justfunc('...'[:max_len], max_len)) - return result diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py index f61bda686c88b..61d2de458fdc9 100644 --- a/pandas/tests/test_format.py +++ b/pandas/tests/test_format.py @@ -1,5 +1,6 @@ from __future__ import print_function # -*- coding: utf-8 -*- +import re from pandas.compat import range, zip, lrange, StringIO, PY3, lzip, u import pandas.compat as compat @@ -45,12 +46,25 @@ def has_non_verbose_info_repr(df): return has_info and nv def has_horizontally_truncated_repr(df): + try: # Check header row + fst_line = np.array(repr(df).splitlines()[0].split()) + cand_col = np.where(fst_line=='...')[0][0] + except: + return False + # Make sure each row has this ... in the same place r = repr(df) - return any(l.strip().endswith('...') for l in r.splitlines()) + for ix,l in enumerate(r.splitlines()): + if not r.split()[cand_col] == '...': + return False + return True def has_vertically_truncated_repr(df): r = repr(df) - return '..' in r.splitlines()[-3] + only_dot_row = False + for row in r.splitlines(): + if re.match('^[\.\ ]+$',row): + only_dot_row = True + return only_dot_row def has_truncated_repr(df): return has_horizontally_truncated_repr(df) or has_vertically_truncated_repr(df) @@ -382,6 +396,40 @@ def test_to_string_with_col_space(self): c30 = len(df.to_string(col_space=30).split("\n")[1]) self.assertTrue(c10 < c20 < c30) + def test_to_string_truncate_indices(self): + for index in [ tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex, + tm.makeDateIndex, tm.makePeriodIndex ]: + for column in [ tm.makeStringIndex ]: + for h in [10,20]: + for w in [10,20]: + with option_context("display.expand_frame_repr",False): + df = DataFrame(index=index(h), columns=column(w)) + with option_context("display.max_rows", 15): + if h == 20: + self.assertTrue(has_vertically_truncated_repr(df)) + else: + self.assertFalse(has_vertically_truncated_repr(df)) + with option_context("display.max_columns", 15): + if w == 20: + print(df) + print(repr(df)) + self.assertTrue(has_horizontally_truncated_repr(df)) + else: + self.assertFalse(has_horizontally_truncated_repr(df)) + with option_context("display.max_rows", 15,"display.max_columns", 15): + if h == 20 and w == 20: + self.assertTrue(has_doubly_truncated_repr(df)) + else: + self.assertFalse(has_doubly_truncated_repr(df)) + + def test_to_string_truncate_multilevel(self): + arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'], + ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']] + df = pd.DataFrame(index=arrays,columns=arrays) + with option_context("display.max_rows", 7,"display.max_columns", 7): + self.assertTrue(has_doubly_truncated_repr(df)) + + def test_to_html_with_col_space(self): def check_with_width(df, col_space): import re @@ -735,6 +783,338 @@ def test_to_html_regression_GH6098(self): # it works df.pivot_table(index=[u('clé1')], columns=[u('clé2')])._repr_html_() + + + + + def test_to_html_truncate(self): + index = pd.DatetimeIndex(start='20010101',freq='D',periods=20) + df = pd.DataFrame(index=index,columns=range(20)) + fmt.set_option('display.max_rows',8) + fmt.set_option('display.max_columns',4) + result = df._repr_html_() + expected = '''\ +<div style="max-height:1000px;max-width:1500px;overflow:auto;"> +<table border="1" class="dataframe"> + <thead> + <tr style="text-align: right;"> + <th></th> + <th>0</th> + <th>1</th> + <th>...</th> + <th>18</th> + <th>19</th> + </tr> + </thead> + <tbody> + <tr> + <th>2001-01-01</th> + <td> NaN</td> + <td> NaN</td> + <td>...</td> + <td> NaN</td> + <td> NaN</td> + </tr> + <tr> + <th>2001-01-02</th> + <td> NaN</td> + <td> NaN</td> + <td>...</td> + <td> NaN</td> + <td> NaN</td> + </tr> + <tr> + <th>2001-01-03</th> + <td> NaN</td> + <td> NaN</td> + <td>...</td> + <td> NaN</td> + <td> NaN</td> + </tr> + <tr> + <th>2001-01-04</th> + <td> NaN</td> + <td> NaN</td> + <td>...</td> + <td> NaN</td> + <td> NaN</td> + </tr> + <tr> + <th>...</th> + <td>...</td> + <td>...</td> + <td>...</td> + <td>...</td> + <td>...</td> + </tr> + <tr> + <th>2001-01-17</th> + <td> NaN</td> + <td> NaN</td> + <td>...</td> + <td> NaN</td> + <td> NaN</td> + </tr> + <tr> + <th>2001-01-18</th> + <td> NaN</td> + <td> NaN</td> + <td>...</td> + <td> NaN</td> + <td> NaN</td> + </tr> + <tr> + <th>2001-01-19</th> + <td> NaN</td> + <td> NaN</td> + <td>...</td> + <td> NaN</td> + <td> NaN</td> + </tr> + <tr> + <th>2001-01-20</th> + <td> NaN</td> + <td> NaN</td> + <td>...</td> + <td> NaN</td> + <td> NaN</td> + </tr> + </tbody> +</table> +<p>20 rows × 20 columns</p> +</div>''' + if sys.version_info[0] < 3: + expected = expected.decode('utf-8') + self.assertEqual(result, expected) + + def test_to_html_truncate_multi_index(self): + arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'], + ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']] + df = pd.DataFrame(index=arrays,columns=arrays) + fmt.set_option('display.max_rows',7) + fmt.set_option('display.max_columns',7) + result = df._repr_html_() + expected = '''\ +<div style="max-height:1000px;max-width:1500px;overflow:auto;"> +<table border="1" class="dataframe"> + <thead> + <tr> + <th></th> + <th></th> + <th colspan="2" halign="left">bar</th> + <th>baz</th> + <th>...</th> + <th>foo</th> + <th colspan="2" halign="left">qux</th> + </tr> + <tr> + <th></th> + <th></th> + <th>one</th> + <th>two</th> + <th>one</th> + <th>...</th> + <th>two</th> + <th>one</th> + <th>two</th> + </tr> + </thead> + <tbody> + <tr> + <th rowspan="2" valign="top">bar</th> + <th>one</th> + <td> NaN</td> + <td> NaN</td> + <td> NaN</td> + <td>...</td> + <td> NaN</td> + <td> NaN</td> + <td> NaN</td> + </tr> + <tr> + <th>two</th> + <td> NaN</td> + <td> NaN</td> + <td> NaN</td> + <td>...</td> + <td> NaN</td> + <td> NaN</td> + <td> NaN</td> + </tr> + <tr> + <th>baz</th> + <th>one</th> + <td> NaN</td> + <td> NaN</td> + <td> NaN</td> + <td>...</td> + <td> NaN</td> + <td> NaN</td> + <td> NaN</td> + </tr> + <tr> + <td>...</td> + <td>...</td> + <td>...</td> + <td>...</td> + <td>...</td> + <td>...</td> + <td>...</td> + <td>...</td> + <td>...</td> + </tr> + <tr> + <th>foo</th> + <th>two</th> + <td> NaN</td> + <td> NaN</td> + <td> NaN</td> + <td>...</td> + <td> NaN</td> + <td> NaN</td> + <td> NaN</td> + </tr> + <tr> + <th rowspan="2" valign="top">qux</th> + <th>one</th> + <td> NaN</td> + <td> NaN</td> + <td> NaN</td> + <td>...</td> + <td> NaN</td> + <td> NaN</td> + <td> NaN</td> + </tr> + <tr> + <th>two</th> + <td> NaN</td> + <td> NaN</td> + <td> NaN</td> + <td>...</td> + <td> NaN</td> + <td> NaN</td> + <td> NaN</td> + </tr> + </tbody> +</table> +<p>8 rows × 8 columns</p> +</div>''' + if sys.version_info[0] < 3: + expected = expected.decode('utf-8') + self.assertEqual(result, expected) + + def test_to_html_truncate_multi_index_sparse_off(self): + arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'], + ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']] + df = pd.DataFrame(index=arrays,columns=arrays) + fmt.set_option('display.max_rows',7) + fmt.set_option('display.max_columns',7) + fmt.set_option('display.multi_sparse',False) + result = df._repr_html_() + expected = '''\ +<div style="max-height:1000px;max-width:1500px;overflow:auto;"> +<table border="1" class="dataframe"> + <thead> + <tr> + <th></th> + <th></th> + <th>bar</th> + <th>bar</th> + <th>baz</th> + <th>...</th> + <th>foo</th> + <th>qux</th> + <th>qux</th> + </tr> + <tr> + <th></th> + <th></th> + <th>one</th> + <th>two</th> + <th>one</th> + <th>...</th> + <th>two</th> + <th>one</th> + <th>two</th> + </tr> + </thead> + <tbody> + <tr> + <th>bar</th> + <th>one</th> + <td> NaN</td> + <td> NaN</td> + <td> NaN</td> + <td>...</td> + <td> NaN</td> + <td> NaN</td> + <td> NaN</td> + </tr> + <tr> + <th>bar</th> + <th>two</th> + <td> NaN</td> + <td> NaN</td> + <td> NaN</td> + <td>...</td> + <td> NaN</td> + <td> NaN</td> + <td> NaN</td> + </tr> + <tr> + <th>baz</th> + <th>one</th> + <td> NaN</td> + <td> NaN</td> + <td> NaN</td> + <td>...</td> + <td> NaN</td> + <td> NaN</td> + <td> NaN</td> + </tr> + <tr> + <th>foo</th> + <th>two</th> + <td> NaN</td> + <td> NaN</td> + <td> NaN</td> + <td>...</td> + <td> NaN</td> + <td> NaN</td> + <td> NaN</td> + </tr> + <tr> + <th>qux</th> + <th>one</th> + <td> NaN</td> + <td> NaN</td> + <td> NaN</td> + <td>...</td> + <td> NaN</td> + <td> NaN</td> + <td> NaN</td> + </tr> + <tr> + <th>qux</th> + <th>two</th> + <td> NaN</td> + <td> NaN</td> + <td> NaN</td> + <td>...</td> + <td> NaN</td> + <td> NaN</td> + <td> NaN</td> + </tr> + </tbody> +</table> +<p>8 rows × 8 columns</p> +</div>''' + if sys.version_info[0] < 3: + expected = expected.decode('utf-8') + self.assertEqual(result, expected) + + + def test_nonunicode_nonascii_alignment(self): df = DataFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]]) rep_str = df.to_string() @@ -1505,14 +1885,14 @@ def test_repr_html_long(self): h = max_rows - 1 df = pandas.DataFrame({'A':np.arange(1,1+h), 'B':np.arange(41, 41+h)}) reg_repr = df._repr_html_() - assert '...' not in reg_repr - assert str(40 + h) in reg_repr + assert '..' not in reg_repr + assert str(41 + max_rows // 2) in reg_repr h = max_rows + 1 df = pandas.DataFrame({'A':np.arange(1,1+h), 'B':np.arange(41, 41+h)}) long_repr = df._repr_html_() - assert '...' in long_repr - assert str(40 + h) not in long_repr + assert '..' in long_repr + assert str(41 + max_rows // 2) not in long_repr assert u('%d rows ') % h in long_repr assert u('2 columns') in long_repr @@ -1521,14 +1901,14 @@ def test_repr_html_float(self): h = max_rows - 1 df = pandas.DataFrame({'idx':np.linspace(-10,10,h), 'A':np.arange(1,1+h), 'B': np.arange(41, 41+h) }).set_index('idx') reg_repr = df._repr_html_() - assert '...' not in reg_repr + assert '..' not in reg_repr assert str(40 + h) in reg_repr h = max_rows + 1 df = pandas.DataFrame({'idx':np.linspace(-10,10,h), 'A':np.arange(1,1+h), 'B': np.arange(41, 41+h) }).set_index('idx') long_repr = df._repr_html_() - assert '...' in long_repr - assert str(40 + h) not in long_repr + assert '..' in long_repr + assert '31' not in long_repr assert u('%d rows ') % h in long_repr assert u('2 columns') in long_repr @@ -1575,7 +1955,7 @@ def test_info_repr(self): # Wide h, w = max_rows-1, max_cols+1 df = pandas.DataFrame(dict((k,np.arange(1,1+h)) for k in np.arange(w))) - assert has_vertically_truncated_repr(df) + assert has_horizontally_truncated_repr(df) with option_context('display.large_repr', 'info'): assert has_info_repr(df)
This PR closes #5603 Dataframes are now truncated centrally (similar to pd.Series). Before: ![trunc_before](https://cloud.githubusercontent.com/assets/832380/2991707/9050ffe4-dc82-11e3-8a2c-083b09dae672.png) After: ![trunc_after](https://cloud.githubusercontent.com/assets/832380/3000645/649bf09e-dd26-11e3-889e-33a052b13554.png) Ipython notebook: Simple Index: ![notebook_trunc](https://cloud.githubusercontent.com/assets/832380/2991894/0031dd44-dc86-11e3-8d1c-8dc8086edac4.png) MultiLevel Index: ![notebook_multi](https://cloud.githubusercontent.com/assets/832380/2991895/05975656-dc86-11e3-886a-c811f4fc3fe5.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/7086
2014-05-09T13:06:30Z
2014-05-16T19:44:16Z
2014-05-16T19:44:16Z
2014-06-17T06:37:31Z
adding a NotImplementedError for simultaneous use of nrows and chunksize...
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index cdc35564cce61..db32f90377d8f 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -86,7 +86,8 @@ Enhancements - Add ``dropna`` argument to ``value_counts`` and ``nunique`` (:issue:`5569`). - +- Add ``NotImplementedError`` for simultaneous use of ``chunksize`` and ``nrows`` + for read_csv() (:issue:`6774`). diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index bd53caf98f6b2..22fe3ef16e34d 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -227,7 +227,10 @@ def _read(filepath_or_buffer, kwds): # Create the parser. parser = TextFileReader(filepath_or_buffer, **kwds) - if nrows is not None: + if (nrows is not None) and (chunksize is not None): + raise NotImplementedError("'nrows' and 'chunksize' can not be used" + " together yet.") + elif nrows is not None: return parser.read(nrows) elif chunksize or iterator: return parser diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py index 16449b317d0b6..c02a3172f4adc 100644 --- a/pandas/io/tests/test_parsers.py +++ b/pandas/io/tests/test_parsers.py @@ -1973,13 +1973,6 @@ def test_header_names_backward_compat(self): header=0) tm.assert_frame_equal(result, expected) - def test_integer_overflow_bug(self): - # #2601 - data = "65248E10 11\n55555E55 22\n" - - result = self.read_csv(StringIO(data), header=None, sep=' ') - self.assertTrue(result[0].dtype == np.float64) - def test_int64_min_issues(self): # #2599 data = 'A,B\n0,0\n0,' @@ -2141,6 +2134,10 @@ def test_ignore_leading_whitespace(self): expected = DataFrame({'a':[1,4,7], 'b':[2,5,8], 'c': [3,6,9]}) tm.assert_frame_equal(result, expected) + def test_nrows_and_chunksize_raises_notimplemented(self): + data = 'a b c' + self.assertRaises(NotImplementedError, self.read_csv, StringIO(data), + nrows=10, chunksize=5) class TestPythonParser(ParserTests, tm.TestCase):
..., as the user intention most likely is to get a TextFileReader, when using the chunksize option. Fixes #6774
https://api.github.com/repos/pandas-dev/pandas/pulls/7085
2014-05-09T01:24:13Z
2014-06-24T13:16:52Z
2014-06-24T13:16:52Z
2014-06-24T13:16:58Z
SQL: datetime writing: add tests + issue with pymysql
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index 9212e72017f85..3299dd3d70c92 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -29,9 +29,9 @@ from datetime import datetime from pandas import DataFrame, Series, Index, MultiIndex, isnull -from pandas import to_timedelta +from pandas import date_range, to_datetime, to_timedelta import pandas.compat as compat -from pandas.compat import StringIO, range, lrange +from pandas.compat import StringIO, range, lrange, string_types from pandas.core.datetools import format as date_format import pandas.io.sql as sql @@ -870,6 +870,29 @@ def test_date_parsing(self): self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64), "IntDateCol loaded with incorrect type") + def test_datetime(self): + if self.driver == 'pymysql': + raise nose.SkipTest('writing datetime not working with pymysql') + + df = DataFrame({'A': date_range('2013-01-01 09:00:00', periods=3), + 'B': np.arange(3.0)}) + df.to_sql('test_datetime', self.conn) + + # with read_table -> type information from schema used + result = sql.read_sql_table('test_datetime', self.conn) + result = result.drop('index', axis=1) + tm.assert_frame_equal(result, df) + + # with read_sql -> no type information -> sqlite has no native + result = sql.read_sql_query('SELECT * FROM test_datetime', self.conn) + result = result.drop('index', axis=1) + if self.flavor == 'sqlite': + self.assertTrue(isinstance(result.loc[0, 'A'], string_types)) + result['A'] = to_datetime(result['A']) + tm.assert_frame_equal(result, df) + else: + tm.assert_frame_equal(result, df) + def test_mixed_dtype_insert(self): # see GH6509 s1 = Series(2**25 + 1,dtype=np.int32) @@ -895,7 +918,7 @@ def connect(self): def setup_driver(self): # sqlite3 is built-in - pass + self.driver = None def tearDown(self): # in memory so tables should not be removed explicitly
There were apparantly not yet tests for writing datetime data. So added a test case for both query/table. For MySQL using the pymysql driver, writing a datetime column is not working ... It gives following error: ``` In [65]: import pymysql In [66]: engine_mysql = sqlalchemy.create_engine('mysql+pymysql://root@localhost/pandas_nosetest') In [67]: df = pd.DataFrame({'A': pd.date_range('2013-01-01 09:00:00', periods=3), ...: 'B': np.arange(3.0)}) In [68]: df.to_sql('test_datetime', engine_mysql, if_exists='replace') Traceback (most recent call last): File "<ipython-input-68-95961c7ce232>", line 1, in <module> df.to_sql('test_datetime', engine_mysql, if_exists='replace') ... File "C:\Anaconda\envs\devel\lib\site-packages\pymysql\converters.py", line 24, in escape_item encoder = encoders[type(val)] KeyError: <class 'pandas.tslib.Timestamp'> ``` The data are feeded to the database using `df.itertuples()` and then `np.asscalar()`, and apparantly this gives data of the `Timestamp` class. And for some reason the other drivers can handle this (because it is a subclass of datetime.datetime?), and pymysql not. Which is not good...
https://api.github.com/repos/pandas-dev/pandas/pulls/7082
2014-05-08T21:40:56Z
2014-05-11T10:52:47Z
2014-05-11T10:52:47Z
2014-06-17T10:50:08Z
BUG: read_fwf colspec should treat None like slice
diff --git a/doc/source/io.rst b/doc/source/io.rst index 1aa6dde2c08b4..a0807088b2cf5 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -809,6 +809,8 @@ two extra parameters: String value 'infer' can be used to instruct the parser to try detecting the column specifications from the first 100 rows of the data. Default behaviour, if not specified, is to infer. + As with regular python slices, you can slice to the end of the line + with ``None``, e.g. ``colspecs = [(0, 1), (1, None)]``. - ``widths``: A list of field widths which can be used instead of 'colspecs' if the intervals are contiguous. diff --git a/doc/source/release.rst b/doc/source/release.rst index 1d48674727d51..8422efd4247d1 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -479,6 +479,9 @@ Bug Fixes claim that they contained all the things (:issue:`7066`). - Bug in ``DataFrame.boxplot`` where it failed to use the axis passed as the ``ax`` argument (:issue:`3578`) - Bug in the ``XlsxWriter`` and ``XlwtWriter`` implementations that resulted in datetime columns being formatted without the time (:issue:`7075`) + were being passed to plotting method +- :func:`read_fwf` treats ``None`` in ``colspec`` like regular python slices. It now reads from the beginning + or until the end of the line when ``colspec`` contains a ``None`` (previously raised a ``TypeError``) pandas 0.13.1 ------------- diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 4898fabfcd2b4..bd53caf98f6b2 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -2232,10 +2232,11 @@ def __init__(self, f, colspecs, delimiter, comment): "input was a %r" % type(colspecs).__name__) for colspec in self.colspecs: + if not (isinstance(colspec, (tuple, list)) and len(colspec) == 2 and - isinstance(colspec[0], (int, np.integer)) and - isinstance(colspec[1], (int, np.integer))): + isinstance(colspec[0], (int, np.integer, type(None))) and + isinstance(colspec[1], (int, np.integer, type(None)))): raise TypeError('Each column specification must be ' '2 element tuple or list of integers') diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py index 2a31eb9608001..ab9912d9b20bb 100644 --- a/pandas/io/tests/test_parsers.py +++ b/pandas/io/tests/test_parsers.py @@ -2326,6 +2326,33 @@ def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(self): 'Each column specification must be.+'): read_fwf(StringIO(self.data1), [('a', 1)]) + def test_fwf_colspecs_None(self): + # GH 7079 + data = """\ +123456 +456789 +""" + colspecs = [(0, 3), (3, None)] + result = read_fwf(StringIO(data), colspecs=colspecs, header=None) + expected = DataFrame([[123, 456], [456, 789]]) + tm.assert_frame_equal(result, expected) + + colspecs = [(None, 3), (3, 6)] + result = read_fwf(StringIO(data), colspecs=colspecs, header=None) + expected = DataFrame([[123, 456], [456, 789]]) + tm.assert_frame_equal(result, expected) + + colspecs = [(0, None), (3, None)] + result = read_fwf(StringIO(data), colspecs=colspecs, header=None) + expected = DataFrame([[123456, 456], [456789, 789]]) + tm.assert_frame_equal(result, expected) + + colspecs = [(None, None), (3, 6)] + result = read_fwf(StringIO(data), colspecs=colspecs, header=None) + expected = DataFrame([[123456, 456], [456789, 789]]) + tm.assert_frame_equal(result, expected) + + def test_fwf_regression(self): # GH 3594 #### turns out 'T060' is parsable as a datetime slice!
Closes https://github.com/pydata/pandas/issues/7079
https://api.github.com/repos/pandas-dev/pandas/pulls/7081
2014-05-08T21:08:46Z
2014-05-09T00:30:13Z
2014-05-09T00:30:13Z
2016-11-03T12:37:56Z
DOC: Remove newline in @verbatim code-block.
diff --git a/doc/source/10min.rst b/doc/source/10min.rst index d6c17a3066b86..4bca2f4a9d4c8 100644 --- a/doc/source/10min.rst +++ b/doc/source/10min.rst @@ -83,7 +83,6 @@ will be completed: @verbatim In [1]: df2.<TAB> - df2.A df2.boxplot df2.abs df2.C df2.add df2.clip
This makes the tab-completed code show up in the rendered documentation.
https://api.github.com/repos/pandas-dev/pandas/pulls/7078
2014-05-08T18:55:44Z
2014-05-08T19:42:27Z
2014-05-08T19:42:27Z
2014-07-16T09:05:01Z
SQL: resolve legacy mode + deprecate mysql flavor (GH6900)
diff --git a/doc/source/io.rst b/doc/source/io.rst index 1aa6dde2c08b4..60a1ab01882a8 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -3159,9 +3159,9 @@ your database. .. versionadded:: 0.14.0 - -If SQLAlchemy is not installed a legacy fallback is provided for sqlite and mysql. -These legacy modes require Python database adapters which respect the `Python +If SQLAlchemy is not installed, a fallback is only provided for sqlite (and +for mysql for backwards compatibility, but this is deprecated). +This mode requires a Python database adapter which respect the `Python DB-API <http://www.python.org/dev/peps/pep-0249/>`__. See also some :ref:`cookbook examples <cookbook.sql>` for some advanced strategies. @@ -3335,9 +3335,14 @@ Engine connection examples engine = create_engine('sqlite:////absolute/path/to/foo.db') -Legacy -~~~~~~ -To use the sqlite support without SQLAlchemy, you can create connections like so: +Sqlite fallback +~~~~~~~~~~~~~~~ + +The use of sqlite is supported without using SQLAlchemy. +This mode requires a Python database adapter which respect the `Python +DB-API <http://www.python.org/dev/peps/pep-0249/>`__. + +You can create connections like so: .. code-block:: python @@ -3345,14 +3350,13 @@ To use the sqlite support without SQLAlchemy, you can create connections like so from pandas.io import sql cnx = sqlite3.connect(':memory:') -And then issue the following queries, remembering to also specify the flavor of SQL -you are using. +And then issue the following queries: .. code-block:: python - data.to_sql('data', cnx, flavor='sqlite') + data.to_sql('data', cnx) - sql.read_sql("SELECT * FROM data", cnx, flavor='sqlite') + sql.read_sql("SELECT * FROM data", cnx) .. _io.bigquery: diff --git a/doc/source/release.rst b/doc/source/release.rst index 728dddbe8b979..245c7492bffb9 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -246,6 +246,9 @@ Deprecations positional argument ``frame`` instead of ``data``. A ``FutureWarning`` is raised if the old ``data`` argument is used by name. (:issue:`6956`) +- The support for the 'mysql' flavor when using DBAPI connection objects has been deprecated. + MySQL will be further supported with SQLAlchemy engines (:issue:`6900`). + Prior Version Deprecations/Changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index cde6bf3bfd670..18e84426c6005 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -475,6 +475,9 @@ Deprecations returned if possible, otherwise a copy will be made. Previously the user could think that ``copy=False`` would ALWAYS return a view. (:issue:`6894`) +- The support for the 'mysql' flavor when using DBAPI connection objects has been deprecated. + MySQL will be further supported with SQLAlchemy engines (:issue:`6900`). + .. _whatsnew_0140.enhancements: Enhancements diff --git a/pandas/io/sql.py b/pandas/io/sql.py index c18a4aef5355b..7a604dcdaba5f 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -76,7 +76,7 @@ def _parse_date_columns(data_frame, parse_dates): return data_frame -def execute(sql, con, cur=None, params=None, flavor='sqlite'): +def execute(sql, con, cur=None, params=None): """ Execute the given SQL query using the provided connection object. @@ -84,24 +84,22 @@ def execute(sql, con, cur=None, params=None, flavor='sqlite'): ---------- sql : string Query to be executed - con : SQLAlchemy engine or DBAPI2 connection (legacy mode) + con : SQLAlchemy engine or sqlite3 DBAPI2 connection Using SQLAlchemy makes it possible to use any DB supported by that library. - If a DBAPI2 object, a supported SQL flavor must also be provided + If a DBAPI2 object, only sqlite3 is supported. cur : depreciated, cursor is obtained from connection params : list or tuple, optional List of parameters to pass to execute method. - flavor : string "sqlite", "mysql" - Specifies the flavor of SQL to use. - Ignored when using SQLAlchemy engine. Required when using DBAPI2 connection. + Returns ------- Results Iterable """ if cur is None: - pandas_sql = pandasSQL_builder(con, flavor=flavor) + pandas_sql = pandasSQL_builder(con) else: - pandas_sql = pandasSQL_builder(cur, flavor=flavor, is_cursor=True) + pandas_sql = pandasSQL_builder(cur, is_cursor=True) args = _convert_params(sql, params) return pandas_sql.execute(*args) @@ -235,7 +233,7 @@ def read_sql_table(table_name, con, meta=None, index_col=None, table_name : string Name of SQL table in database con : SQLAlchemy engine - Legacy mode not supported + Sqlite DBAPI conncection mode not supported meta : SQLAlchemy meta, optional If omitted MetaData is reflected from engine index_col : string, optional @@ -277,8 +275,8 @@ def read_sql_table(table_name, con, meta=None, index_col=None, raise ValueError("Table %s not found" % table_name, con) -def read_sql_query(sql, con, index_col=None, flavor='sqlite', - coerce_float=True, params=None, parse_dates=None): +def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None, + parse_dates=None): """Read SQL query into a DataFrame. Returns a DataFrame corresponding to the result set of the query @@ -289,15 +287,12 @@ def read_sql_query(sql, con, index_col=None, flavor='sqlite', ---------- sql : string SQL query to be executed - con : SQLAlchemy engine or DBAPI2 connection (legacy mode) + con : SQLAlchemy engine or sqlite3 DBAPI2 connection Using SQLAlchemy makes it possible to use any DB supported by that library. - If a DBAPI2 object is given, a supported SQL flavor must also be provided + If a DBAPI2 object, only sqlite3 is supported. index_col : string, optional column name to use for the returned DataFrame object. - flavor : string, {'sqlite', 'mysql'} - The flavor of SQL to use. Ignored when using - SQLAlchemy engine. Required when using DBAPI2 connection. coerce_float : boolean, default True Attempt to convert values to non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets @@ -324,7 +319,7 @@ def read_sql_query(sql, con, index_col=None, flavor='sqlite', read_sql """ - pandas_sql = pandasSQL_builder(con, flavor=flavor) + pandas_sql = pandasSQL_builder(con) return pandas_sql.read_sql( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates) @@ -342,12 +337,13 @@ def read_sql(sql, con, index_col=None, flavor='sqlite', coerce_float=True, con : SQLAlchemy engine or DBAPI2 connection (legacy mode) Using SQLAlchemy makes it possible to use any DB supported by that library. - If a DBAPI2 object is given, a supported SQL flavor must also be provided + If a DBAPI2 object, only sqlite3 is supported. index_col : string, optional column name to use for the returned DataFrame object. flavor : string, {'sqlite', 'mysql'} The flavor of SQL to use. Ignored when using SQLAlchemy engine. Required when using DBAPI2 connection. + 'mysql' is still supported, but will be removed in future versions. coerce_float : boolean, default True Attempt to convert values to non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets @@ -417,13 +413,14 @@ def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True, frame : DataFrame name : string Name of SQL table - con : SQLAlchemy engine or DBAPI2 connection (legacy mode) + con : SQLAlchemy engine or sqlite3 DBAPI2 connection Using SQLAlchemy makes it possible to use any DB supported by that library. - If a DBAPI2 object is given, a supported SQL flavor must also be provided + If a DBAPI2 object, only sqlite3 is supported. flavor : {'sqlite', 'mysql'}, default 'sqlite' The flavor of SQL to use. Ignored when using SQLAlchemy engine. Required when using DBAPI2 connection. + 'mysql' is still supported, but will be removed in future versions. if_exists : {'fail', 'replace', 'append'}, default 'fail' - fail: If table exists, do nothing. - replace: If table exists, drop it, recreate it, and insert data. @@ -458,13 +455,14 @@ def has_table(table_name, con, flavor='sqlite'): ---------- table_name: string Name of SQL table - con: SQLAlchemy engine or DBAPI2 connection (legacy mode) + con: SQLAlchemy engine or sqlite3 DBAPI2 connection Using SQLAlchemy makes it possible to use any DB supported by that library. - If a DBAPI2 object is given, a supported SQL flavor name must also be provided + If a DBAPI2 object, only sqlite3 is supported. flavor: {'sqlite', 'mysql'}, default 'sqlite' The flavor of SQL to use. Ignored when using SQLAlchemy engine. Required when using DBAPI2 connection. + 'mysql' is still supported, but will be removed in future versions. Returns ------- @@ -476,6 +474,10 @@ def has_table(table_name, con, flavor='sqlite'): table_exists = has_table +_MYSQL_WARNING = ("The 'mysql' flavor with DBAPI connection is deprecated " + "and will be removed in future versions. " + "MySQL will be further supported with SQLAlchemy engines.") + def pandasSQL_builder(con, flavor=None, meta=None, is_cursor=False): """ Convenience function to return the correct PandasSQL subclass based on the @@ -489,21 +491,14 @@ def pandasSQL_builder(con, flavor=None, meta=None, is_cursor=False): if isinstance(con, sqlalchemy.engine.Engine): return PandasSQLAlchemy(con, meta=meta) else: - warnings.warn("Not an SQLAlchemy engine, " - "attempting to use as legacy DBAPI connection") - if flavor is None: - raise ValueError( - "PandasSQL must be created with an SQLAlchemy engine " - "or a DBAPI2 connection and SQL flavor") - else: - return PandasSQLLegacy(con, flavor, is_cursor=is_cursor) + if flavor == 'mysql': + warnings.warn(_MYSQL_WARNING, FutureWarning) + return PandasSQLLegacy(con, flavor, is_cursor=is_cursor) except ImportError: - warnings.warn("SQLAlchemy not installed, using legacy mode") - if flavor is None: - raise SQLAlchemyRequired - else: - return PandasSQLLegacy(con, flavor, is_cursor=is_cursor) + if flavor == 'mysql': + warnings.warn(_MYSQL_WARNING, FutureWarning) + return PandasSQLLegacy(con, flavor, is_cursor=is_cursor) class PandasSQLTable(PandasObject): @@ -893,7 +888,7 @@ def _create_sql_schema(self, frame, table_name): } -_SAFE_NAMES_WARNING = ("The spaces in these column names will not be changed." +_SAFE_NAMES_WARNING = ("The spaces in these column names will not be changed. " "In pandas versions < 0.14, spaces were converted to " "underscores.") @@ -991,6 +986,8 @@ class PandasSQLLegacy(PandasSQL): def __init__(self, con, flavor, is_cursor=False): self.is_cursor = is_cursor self.con = con + if flavor is None: + flavor = 'sqlite' if flavor not in ['sqlite', 'mysql']: raise NotImplementedError else: @@ -1098,6 +1095,8 @@ def get_schema(frame, name, flavor='sqlite', keys=None, con=None): """ if con is None: + if flavor == 'mysql': + warnings.warn(_MYSQL_WARNING, FutureWarning) return _get_schema_legacy(frame, name, flavor, keys) pandas_sql = pandasSQL_builder(con=con, flavor=flavor) diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index 9a34e84c153a0..35acfc0ac8bf4 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -332,7 +332,7 @@ def setUp(self): def test_read_sql_iris(self): iris_frame = sql.read_sql_query( - "SELECT * FROM iris", self.conn, flavor='sqlite') + "SELECT * FROM iris", self.conn) self._check_iris_loaded_frame(iris_frame) def test_legacy_read_frame(self): @@ -391,8 +391,7 @@ def test_to_sql_append(self): def test_to_sql_series(self): s = Series(np.arange(5, dtype='int64'), name='series') sql.to_sql(s, "test_series", self.conn, flavor='sqlite', index=False) - s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn, - flavor='sqlite') + s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn) tm.assert_frame_equal(s.to_frame(), s2) def test_to_sql_panel(self): @@ -416,8 +415,7 @@ def test_roundtrip(self): con=self.conn, flavor='sqlite') result = sql.read_sql_query( 'SELECT * FROM test_frame_roundtrip', - con=self.conn, - flavor='sqlite') + con=self.conn) # HACK! result.index = self.test_frame1.index @@ -428,41 +426,38 @@ def test_roundtrip(self): def test_execute_sql(self): # drop_sql = "DROP TABLE IF EXISTS test" # should already be done - iris_results = sql.execute( - "SELECT * FROM iris", con=self.conn, flavor='sqlite') + iris_results = sql.execute("SELECT * FROM iris", con=self.conn) row = iris_results.fetchone() tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa']) def test_date_parsing(self): # Test date parsing in read_sq # No Parsing - df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn, - flavor='sqlite') + df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn) self.assertFalse( issubclass(df.DateCol.dtype.type, np.datetime64), "DateCol loaded with incorrect type") df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn, - flavor='sqlite', parse_dates=['DateCol']) + parse_dates=['DateCol']) self.assertTrue( issubclass(df.DateCol.dtype.type, np.datetime64), "DateCol loaded with incorrect type") df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn, - flavor='sqlite', parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'}) self.assertTrue( issubclass(df.DateCol.dtype.type, np.datetime64), "DateCol loaded with incorrect type") df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn, - flavor='sqlite', parse_dates=['IntDateCol']) + parse_dates=['IntDateCol']) self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64), "IntDateCol loaded with incorrect type") df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn, - flavor='sqlite', parse_dates={'IntDateCol': 's'}) + parse_dates={'IntDateCol': 's'}) self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64), "IntDateCol loaded with incorrect type") @@ -471,7 +466,7 @@ def test_date_and_index(self): # Test case where same column appears in parse_date and index_col df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn, - flavor='sqlite', index_col='DateCol', + index_col='DateCol', parse_dates=['DateCol', 'IntDateCol']) self.assertTrue(issubclass(df.index.dtype.type, np.datetime64), @@ -651,22 +646,19 @@ def test_sql_open_close(self): conn = self.connect(name) result = sql.read_sql_query("SELECT * FROM test_frame2_legacy;", - conn, flavor="sqlite") + conn) conn.close() tm.assert_frame_equal(self.test_frame2, result) def test_read_sql_delegate(self): - iris_frame1 = sql.read_sql_query( - "SELECT * FROM iris", self.conn, flavor=self.flavor) - iris_frame2 = sql.read_sql( - "SELECT * FROM iris", self.conn, flavor=self.flavor) + iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn) + iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn) tm.assert_frame_equal(iris_frame1, iris_frame2, "read_sql and read_sql_query have not the same" " result with a query") - self.assertRaises(ValueError, sql.read_sql, 'iris', self.conn, - flavor=self.flavor) + self.assertRaises(ValueError, sql.read_sql, 'iris', self.conn) def test_safe_names_warning(self): # GH 6798 @@ -1109,6 +1101,14 @@ def tearDown(self): self.conn.commit() self.conn.close() + def test_a_deprecation(self): + with tm.assert_produces_warning(FutureWarning): + sql.to_sql(self.test_frame1, 'test_frame1', self.conn, + flavor='mysql') + self.assertTrue( + sql.has_table('test_frame1', self.conn, flavor='mysql'), + 'Table not written to DB') + #------------------------------------------------------------------------------ #--- Old tests from 0.13.1 (before refactor using sqlalchemy) @@ -1277,8 +1277,6 @@ def _check_roundtrip(self, frame): expected = frame.copy() expected.index = Index(lrange(len(frame2))) + 10 expected.index.name = 'Idx' - print(expected.index.names) - print(result.index.names) tm.assert_frame_equal(expected, result) def test_tquery(self):
- removed warning for not using sqlalchemy (as sqlite DBAPI connection is fully supported, no warning needed) - added deprecation warning for 'mysql' flavor - removed necessity of providing flavor kwarg (no warning if not provided, assumed to be sqlite3) - removed `flavor` kwarg from execute and read_sql_query (because a) are not database specific functions + b) would only be needed for mysql, but this will is deprecated so no need to introduce it) - updated the tests to reflect this - updated docs and docstrings to reflect that sqlite3 is the only but fully supported DBAPI connection (no 'legacy') Closes #6900.
https://api.github.com/repos/pandas-dev/pandas/pulls/7077
2014-05-08T14:45:34Z
2014-05-11T09:54:15Z
2014-05-11T09:54:15Z
2016-07-11T12:50:56Z
FIX sql handling of timedelta64 columns (GH6921)
diff --git a/doc/source/io.rst b/doc/source/io.rst index a0807088b2cf5..212b2debc98a5 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -809,8 +809,6 @@ two extra parameters: String value 'infer' can be used to instruct the parser to try detecting the column specifications from the first 100 rows of the data. Default behaviour, if not specified, is to infer. - As with regular python slices, you can slice to the end of the line - with ``None``, e.g. ``colspecs = [(0, 1), (1, None)]``. - ``widths``: A list of field widths which can be used instead of 'colspecs' if the intervals are contiguous. @@ -3235,6 +3233,13 @@ the database using :func:`~pandas.DataFrame.to_sql`. data.to_sql('data', engine) +.. note:: + + Due to the limited support for timedelta's in the different database + flavors, columns with type ``timedelta64`` will be written as integer + values as nanoseconds to the database and a warning will be raised. + + Reading Tables ~~~~~~~~~~~~~~ diff --git a/pandas/io/sql.py b/pandas/io/sql.py index c18a4aef5355b..d54e3a5f549ca 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -730,7 +730,10 @@ def _sqlalchemy_type(self, arr_or_dtype): except: return DateTime if com.is_timedelta64_dtype(arr_or_dtype): - return Interval + warnings.warn("the 'timedelta' type is not supported, and will be " + "written as integer values (ns frequency) to the " + "database.", UserWarning) + return Integer elif com.is_float_dtype(arr_or_dtype): return Float elif com.is_integer_dtype(arr_or_dtype): @@ -973,6 +976,11 @@ def _sql_type_name(self, dtype): pytype_name = "text" if issubclass(pytype, np.floating): pytype_name = "float" + elif com.is_timedelta64_dtype(pytype): + warnings.warn("the 'timedelta' type is not supported, and will be " + "written as integer values (ns frequency) to the " + "database.", UserWarning) + pytype_name = "int" elif issubclass(pytype, np.integer): pytype_name = "int" elif issubclass(pytype, np.datetime64) or pytype is datetime: diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index 9a34e84c153a0..8b1de89be7acb 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -29,12 +29,14 @@ from datetime import datetime from pandas import DataFrame, Series, Index, MultiIndex, isnull +from pandas import to_timedelta import pandas.compat as compat from pandas.compat import StringIO, range, lrange from pandas.core.datetools import format as date_format import pandas.io.sql as sql import pandas.util.testing as tm +from pandas import _np_version_under1p7 try: @@ -480,6 +482,17 @@ def test_date_and_index(self): self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64), "IntDateCol loaded with incorrect type") + def test_timedelta(self): + # see #6921 + if _np_version_under1p7: + raise nose.SkipTest("test only valid in numpy >= 1.7") + + df = to_timedelta(Series(['00:00:01', '00:00:03'], name='foo')).to_frame() + with tm.assert_produces_warning(UserWarning): + df.to_sql('test_timedelta', self.conn) + result = sql.read_sql_query('SELECT * FROM test_timedelta', self.conn) + tm.assert_series_equal(result['foo'], df['foo'].astype('int64')) + def test_to_sql_index_label(self): temp_frame = DataFrame({'col1': range(4)})
Closes #6921 Converting to an int was actually also how it was now with sqlite DBAPI fallback (as timedelta64 is subclass of integer), so this solution is also backwards compatible and consistent between sqlalchemy and fallback mode.
https://api.github.com/repos/pandas-dev/pandas/pulls/7076
2014-05-08T14:43:56Z
2014-05-11T09:55:01Z
2014-05-11T09:55:01Z
2014-09-06T09:40:27Z
Fix Xl(sx|wt)Writer always using date_format even if a datetime is supplied.
diff --git a/doc/source/release.rst b/doc/source/release.rst index 51a4b52953287..1d48674727d51 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -478,6 +478,7 @@ Bug Fixes - Bug in ``Float64Index.isin()`` where containing ``nan`` s would make indices claim that they contained all the things (:issue:`7066`). - Bug in ``DataFrame.boxplot`` where it failed to use the axis passed as the ``ax`` argument (:issue:`3578`) +- Bug in the ``XlsxWriter`` and ``XlwtWriter`` implementations that resulted in datetime columns being formatted without the time (:issue:`7075`) pandas 0.13.1 ------------- diff --git a/pandas/io/excel.py b/pandas/io/excel.py index f4f40c8be7855..e81c279c5820d 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -661,7 +661,7 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0): num_format_str = None if isinstance(cell.val, datetime.datetime): num_format_str = self.datetime_format - if isinstance(cell.val, datetime.date): + elif isinstance(cell.val, datetime.date): num_format_str = self.date_format stylekey = json.dumps(cell.style) @@ -782,7 +782,7 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0): num_format_str = None if isinstance(cell.val, datetime.datetime): num_format_str = self.datetime_format - if isinstance(cell.val, datetime.date): + elif isinstance(cell.val, datetime.date): num_format_str = self.date_format stylekey = json.dumps(cell.style)
In the `XlsxWriter` and `XlwtWriter` implementations there is a minor bug resulting in the `datetime_format` being discarded (i.e. https://github.com/pydata/pandas/blob/master/pandas/io/excel.py#L662): ``` if isinstance(cell.val, datetime.datetime): num_format_str = self.datetime_format if isinstance(cell.val, datetime.date): num_format_str = self.date_format ``` Since `datetime.datetime` derives from `datetime.date` the second `if`-clause in the will always trigger, leading to the `num_format_str` being overwritten by the `date_format`.
https://api.github.com/repos/pandas-dev/pandas/pulls/7075
2014-05-08T08:57:46Z
2014-05-08T13:38:37Z
2014-05-08T13:38:37Z
2015-01-06T14:44:42Z
BUG: PEBKAC bug in Float64Index
diff --git a/doc/source/release.rst b/doc/source/release.rst index b5a11091779ec..728dddbe8b979 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -475,6 +475,8 @@ Bug Fixes caused possible color/class mismatch (:issue:`6956`) - Bug in ``radviz`` and ``andrews_curves`` where multiple values of 'color' were being passed to plotting method (:issue:`6956`) +- Bug in ``Float64Index.isin()`` where containing ``nan`` s would make indices + claim that they contained all the things (:issue:`7066`). pandas 0.13.1 ------------- diff --git a/pandas/core/index.py b/pandas/core/index.py index ff6ee79bf24e4..c3619b992028d 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -2096,7 +2096,8 @@ def isin(self, values): """ value_set = set(values) return lib.ismember_nans(self._array_values(), value_set, - self._hasnans) + isnull(list(value_set)).any()) + class MultiIndex(Index): diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index 206c85124e122..dafbfd07ca51d 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -913,6 +913,25 @@ def test_contains_not_nans(self): i = Float64Index([1.0, 2.0, np.nan]) self.assertTrue(1.0 in i) + def test_doesnt_contain_all_the_things(self): + i = Float64Index([np.nan]) + self.assertFalse(i.isin([0]).item()) + self.assertFalse(i.isin([1]).item()) + self.assertTrue(i.isin([np.nan]).item()) + + def test_nan_multiple_containment(self): + i = Float64Index([1.0, np.nan]) + np.testing.assert_array_equal(i.isin([1.0]), np.array([True, False])) + np.testing.assert_array_equal(i.isin([2.0, np.pi]), + np.array([False, False])) + np.testing.assert_array_equal(i.isin([np.nan]), + np.array([False, True])) + np.testing.assert_array_equal(i.isin([1.0, np.nan]), + np.array([True, True])) + i = Float64Index([1.0, 2.0]) + np.testing.assert_array_equal(i.isin([np.nan]), + np.array([False, False])) + class TestInt64Index(tm.TestCase): _multiprocess_can_split_ = True
closes #7066
https://api.github.com/repos/pandas-dev/pandas/pulls/7068
2014-05-07T22:09:43Z
2014-05-08T12:39:13Z
2014-05-08T12:39:13Z
2014-07-16T09:04:53Z
Added dropna to docstring for HDFStore.put()
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 95daa2bbc2752..9fabf0ae960fe 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -809,6 +809,8 @@ def put(self, key, value, format=None, append=False, **kwargs): This will force Table format, append the input data to the existing. encoding : default None, provide an encoding for strings + dropna : boolean, default True, do not write an ALL nan row to + the store settable by the option 'io.hdf.dropna_table' """ if format is None: format = get_option("io.hdf.default_format") or 'fixed'
HDFStore.put() method accepts dropna boolean parameter as it is passed directly to the internal _write_to_group() method, but it was not explicitly documented (as it is for append). Also settable via option. Just added 2 lines to the put() method docstring to mention it explicitly - copied from append()
https://api.github.com/repos/pandas-dev/pandas/pulls/7064
2014-05-07T19:29:10Z
2014-05-08T14:25:49Z
2014-05-08T14:25:49Z
2014-07-16T09:04:52Z
DOC: One more cleanup on visualization.rst
diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst index d653cb53d6d5b..e5eee45894266 100644 --- a/doc/source/visualization.rst +++ b/doc/source/visualization.rst @@ -118,11 +118,12 @@ The ``kind`` keyword argument of :meth:`~DataFrame.plot` accepts a handful of values for plots other than the default Line plot. These include: -* :ref:`'bar' <visualization.barplot>` or ``'barh'`` for bar plots, -* :ref:`'kde' <visualization.kde>` or ``'density'`` for density plots, -* :ref:`'area' <visualization.area_plot>` for area plots, -* :ref:`'scatter' <visualization.scatter_matrix>` for scatter plots, and -* :ref:`'hexbin' <visualization.hexbin>` for hexagonal bin plots. +* :ref:`'bar' <visualization.barplot>` or :ref:`'barh' <visualization.barplot>` for bar plots +* :ref:`'kde' <visualization.kde>` or ``'density'`` for density plots +* :ref:`'area' <visualization.area_plot>` for area plots +* :ref:`'scatter' <visualization.scatter_matrix>` for scatter plots +* :ref:`'hexbin' <visualization.hexbin>` for hexagonal bin plots +* :ref:`'pie' <visualization.pie>` for pie plots In addition to these ``kind`` s, there are the :ref:`DataFrame.hist() <visualization.hist>`, and :ref:`DataFrame.boxplot() <visualization.box>` methods, which use a separate interface. @@ -132,12 +133,12 @@ that take a :class:`Series` or :class:`DataFrame` as an argument. These include * :ref:`Scatter Matrix <visualization.scatter_matrix>` -* :ref:`Andrews Curves <visualization.andrews_curves>`, -* :ref:`Parallel Coordinates <visualization.parallel_coordinates>`, -* :ref:`Lag Plot <visualization.lag>`, -* :ref:`Autocorrelation Plot <visualization.autocorrelation>`, -* :ref:`Bootstrap Plot <visualization.bootstrap>`, and -* :ref:`RadViz <visualization.radviz>`. +* :ref:`Andrews Curves <visualization.andrews_curves>` +* :ref:`Parallel Coordinates <visualization.parallel_coordinates>` +* :ref:`Lag Plot <visualization.lag>` +* :ref:`Autocorrelation Plot <visualization.autocorrelation>` +* :ref:`Bootstrap Plot <visualization.bootstrap>` +* :ref:`RadViz <visualization.radviz>` Plots may also be adorned with :ref:`errorbars <visualization.errorbars>` or :ref:`tables <visualization.table>`. @@ -713,8 +714,8 @@ layout and formatting of the returned plot: For each kind of plot (e.g. `line`, `bar`, `scatter`) any additional arguments keywords are passed alogn to the corresponding matplotlib function (:meth:`ax.plot() <matplotlib.axes.Axes.plot>`, -:meth: `ax.bar() <matplotlib.axes.Axes.bar>`, -:meth: `ax.scatter() <matplotlib.axes.Axes.scatter>`). These can be used +:meth:`ax.bar() <matplotlib.axes.Axes.bar>`, +:meth:`ax.scatter() <matplotlib.axes.Axes.scatter>`). These can be used to control additional styling, beyond what pandas provides. Controlling the Legend
Followup to https://github.com/pydata/pandas/pull/7054 DOC: fix formatting on plot formattting DOC: add KDE to other plots more bits of formatting Thanks for catching these
https://api.github.com/repos/pandas-dev/pandas/pulls/7062
2014-05-07T12:44:27Z
2014-05-07T13:14:48Z
2014-05-07T13:14:48Z
2016-11-03T12:37:55Z
ENH: use size instead of cythonized count for fallback cases
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 400f7e06df784..2c7f6c5e181da 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -722,8 +722,7 @@ def size(self): last = _groupby_function('last', 'last', _last_compat, numeric_only=False, _convert=True) - _count = _groupby_function('_count', 'count', - lambda x, axis=0: notnull(x).sum(axis=axis), + _count = _groupby_function('_count', 'count', lambda x, axis=0: x.size(), numeric_only=False) def count(self, axis=0): diff --git a/pandas/src/generate_code.py b/pandas/src/generate_code.py index 53754a899adf8..b432ddd03d17f 100644 --- a/pandas/src/generate_code.py +++ b/pandas/src/generate_code.py @@ -2219,18 +2219,21 @@ def put2d_%(name)s_%(dest_type)s(ndarray[%(c_type)s, ndim=2, cast=True] values, #------------------------------------------------------------------------- # Generators -def generate_put_template(template, use_ints = True, use_floats = True, - use_objects=False): +def generate_put_template(template, use_ints=True, use_floats=True, + use_objects=False, use_datelikes=False): floats_list = [ ('float64', 'float64_t', 'float64_t', 'np.float64'), ('float32', 'float32_t', 'float32_t', 'np.float32'), - ] + ] ints_list = [ ('int8', 'int8_t', 'float32_t', 'np.float32'), ('int16', 'int16_t', 'float32_t', 'np.float32'), ('int32', 'int32_t', 'float64_t', 'np.float64'), ('int64', 'int64_t', 'float64_t', 'np.float64'), - ] + ] + date_like_list = [ + ('int64', 'int64_t', 'float64_t', 'np.float64'), + ] object_list = [('object', 'object', 'float64_t', 'np.float64')] function_list = [] if use_floats: @@ -2239,14 +2242,16 @@ def generate_put_template(template, use_ints = True, use_floats = True, function_list.extend(ints_list) if use_objects: function_list.extend(object_list) + if use_datelikes: + function_list.extend(date_like_list) output = StringIO() for name, c_type, dest_type, dest_dtype in function_list: - func = template % {'name' : name, - 'c_type' : c_type, - 'dest_type' : dest_type.replace('_t', ''), - 'dest_type2' : dest_type, - 'dest_dtype' : dest_dtype} + func = template % {'name': name, + 'c_type': c_type, + 'dest_type': dest_type.replace('_t', ''), + 'dest_type2': dest_type, + 'dest_dtype': dest_dtype} output.write(func) return output.getvalue() @@ -2372,7 +2377,9 @@ def generate_take_cython_file(path='generated.pyx'): print(generate_put_template(template, use_ints=False), file=f) for template in groupby_count: - print(generate_put_template(template, use_objects=True), file=f) + print(generate_put_template(template, use_ints=False, + use_datelikes=True, use_objects=True), + file=f) # for template in templates_1d_datetime: # print >> f, generate_from_template_datetime(template) diff --git a/pandas/src/generated.pyx b/pandas/src/generated.pyx index 26c6f3daf0e0a..42ae043847ba1 100644 --- a/pandas/src/generated.pyx +++ b/pandas/src/generated.pyx @@ -6697,81 +6697,9 @@ def group_count_float32(ndarray[float32_t, ndim=2] out, @cython.boundscheck(False) @cython.wraparound(False) -def group_count_int8(ndarray[float32_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[int8_t, ndim=2] values, - ndarray[int64_t] labels): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, lab - Py_ssize_t N = values.shape[0], K = values.shape[1] - int8_t val - ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), - dtype=np.int64) - - if len(values) != len(labels): - raise AssertionError("len(index) != len(labels)") - - for i in range(N): - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - for j in range(K): - val = values[i, j] - - # not nan - nobs[lab, j] += val == val and val != iNaT - - for i in range(len(counts)): - for j in range(K): - out[i, j] = nobs[i, j] - - -@cython.boundscheck(False) -@cython.wraparound(False) -def group_count_int16(ndarray[float32_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[int16_t, ndim=2] values, - ndarray[int64_t] labels): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, lab - Py_ssize_t N = values.shape[0], K = values.shape[1] - int16_t val - ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), - dtype=np.int64) - - if len(values) != len(labels): - raise AssertionError("len(index) != len(labels)") - - for i in range(N): - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - for j in range(K): - val = values[i, j] - - # not nan - nobs[lab, j] += val == val and val != iNaT - - for i in range(len(counts)): - for j in range(K): - out[i, j] = nobs[i, j] - - -@cython.boundscheck(False) -@cython.wraparound(False) -def group_count_int32(ndarray[float64_t, ndim=2] out, +def group_count_object(ndarray[float64_t, ndim=2] out, ndarray[int64_t] counts, - ndarray[int32_t, ndim=2] values, + ndarray[object, ndim=2] values, ndarray[int64_t] labels): ''' Only aggregates on axis=0 @@ -6779,7 +6707,7 @@ def group_count_int32(ndarray[float64_t, ndim=2] out, cdef: Py_ssize_t i, j, lab Py_ssize_t N = values.shape[0], K = values.shape[1] - int32_t val + object val ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), dtype=np.int64) @@ -6839,42 +6767,6 @@ def group_count_int64(ndarray[float64_t, ndim=2] out, out[i, j] = nobs[i, j] -@cython.boundscheck(False) -@cython.wraparound(False) -def group_count_object(ndarray[float64_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[object, ndim=2] values, - ndarray[int64_t] labels): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, lab - Py_ssize_t N = values.shape[0], K = values.shape[1] - object val - ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), - dtype=np.int64) - - if len(values) != len(labels): - raise AssertionError("len(index) != len(labels)") - - for i in range(N): - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - for j in range(K): - val = values[i, j] - - # not nan - nobs[lab, j] += val == val and val != iNaT - - for i in range(len(counts)): - for j in range(K): - out[i, j] = nobs[i, j] - - @cython.boundscheck(False) @cython.wraparound(False) @@ -6946,77 +6838,9 @@ def group_count_bin_float32(ndarray[float32_t, ndim=2] out, @cython.boundscheck(False) @cython.wraparound(False) -def group_count_bin_int8(ndarray[float32_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[int8_t, ndim=2] values, - ndarray[int64_t] bins): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, ngroups - Py_ssize_t N = values.shape[0], K = values.shape[1], b = 0 - int8_t val - ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), - dtype=np.int64) - - ngroups = len(bins) + (bins[len(bins) - 1] != N) - - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - for j in range(K): - val = values[i, j] - - # not nan - nobs[b, j] += val == val and val != iNaT - - for i in range(ngroups): - for j in range(K): - out[i, j] = nobs[i, j] - - -@cython.boundscheck(False) -@cython.wraparound(False) -def group_count_bin_int16(ndarray[float32_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[int16_t, ndim=2] values, - ndarray[int64_t] bins): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, ngroups - Py_ssize_t N = values.shape[0], K = values.shape[1], b = 0 - int16_t val - ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), - dtype=np.int64) - - ngroups = len(bins) + (bins[len(bins) - 1] != N) - - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - for j in range(K): - val = values[i, j] - - # not nan - nobs[b, j] += val == val and val != iNaT - - for i in range(ngroups): - for j in range(K): - out[i, j] = nobs[i, j] - - -@cython.boundscheck(False) -@cython.wraparound(False) -def group_count_bin_int32(ndarray[float64_t, ndim=2] out, +def group_count_bin_object(ndarray[float64_t, ndim=2] out, ndarray[int64_t] counts, - ndarray[int32_t, ndim=2] values, + ndarray[object, ndim=2] values, ndarray[int64_t] bins): ''' Only aggregates on axis=0 @@ -7024,7 +6848,7 @@ def group_count_bin_int32(ndarray[float64_t, ndim=2] out, cdef: Py_ssize_t i, j, ngroups Py_ssize_t N = values.shape[0], K = values.shape[1], b = 0 - int32_t val + object val ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), dtype=np.int64) @@ -7080,40 +6904,6 @@ def group_count_bin_int64(ndarray[float64_t, ndim=2] out, out[i, j] = nobs[i, j] -@cython.boundscheck(False) -@cython.wraparound(False) -def group_count_bin_object(ndarray[float64_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[object, ndim=2] values, - ndarray[int64_t] bins): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, ngroups - Py_ssize_t N = values.shape[0], K = values.shape[1], b = 0 - object val - ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), - dtype=np.int64) - - ngroups = len(bins) + (bins[len(bins) - 1] != N) - - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - for j in range(K): - val = values[i, j] - - # not nan - nobs[b, j] += val == val and val != iNaT - - for i in range(ngroups): - for j in range(K): - out[i, j] = nobs[i, j] - - @cython.wraparound(False) @cython.boundscheck(False) diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index eb3c28b672fd4..107bc46da49fa 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -4202,6 +4202,19 @@ def test_datetime_count(self): name='dates') tm.assert_series_equal(result, expected) + def test_lower_int_prec_count(self): + df = DataFrame({'a': np.array([0, 1, 2, 100], np.int8), + 'b': np.array([1, 2, 3, 6], np.uint32), + 'c': np.array([4, 5, 6, 8], np.int16), + 'grp': list('ab' * 2)}) + result = df.groupby('grp').count() + expected = DataFrame({'a': [2, 2], + 'b': [2, 2], + 'c': [2, 2]}, index=pd.Index(list('ab'), + name='grp')) + tm.assert_frame_equal(result, expected) + + def assert_fp_equal(a, b): assert (np.abs(a - b) < 1e-12).all() diff --git a/vb_suite/groupby.py b/vb_suite/groupby.py index 638862ffd1367..6f2132ff9b154 100644 --- a/vb_suite/groupby.py +++ b/vb_suite/groupby.py @@ -133,7 +133,7 @@ def f(): value2 = np.random.randn(n) value2[np.random.rand(n) > 0.5] = np.nan -obj = pd.util.testing.choice(['a', 'b'], size=n).astype(object) +obj = tm.choice(list('ab'), size=n).astype(object) obj[np.random.randn(n) > 0.5] = np.nan df = DataFrame({'key1': np.random.randint(0, 500, size=n), @@ -141,6 +141,7 @@ def f(): 'dates': dates, 'value2' : value2, 'value3' : np.random.randn(n), + 'ints': np.random.randint(0, 1000, size=n), 'obj': obj, 'offsets': offsets}) """ @@ -148,6 +149,19 @@ def f(): groupby_multi_count = Benchmark("df.groupby(['key1', 'key2']).count()", setup, name='groupby_multi_count', start_date=datetime(2014, 5, 5)) + +setup = common_setup + """ +n = 10000 + +df = DataFrame({'key1': randint(0, 500, size=n), + 'key2': randint(0, 100, size=n), + 'ints': randint(0, 1000, size=n), + 'ints2': randint(0, 1000, size=n)}) +""" + +groupby_int_count = Benchmark("df.groupby(['key1', 'key2']).count()", + setup, name='groupby_int_count', + start_date=datetime(2014, 5, 6)) #---------------------------------------------------------------------- # Series.value_counts
- use size instead of cythonized count for integer case since we cannot have `nan` in that case - not faster perf wise - compilation time is shorter because count has no int templates except for dates and times - any precision issues (don't think there were any) with other integer types are gone, since only `int64` is compared with dates, whereas lower precision integers use `size`. Anywho, here's the vbench results for this PR vs master: ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- groupby_int_count | 4.0247 | 4.0337 | 0.9978 | groupby_multi_count | 7.5080 | 7.4813 | 1.0036 | ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7055
2014-05-06T14:02:58Z
2014-05-08T17:55:21Z
2014-05-08T17:55:21Z
2014-07-16T09:04:47Z
DOC: cleanup vis docs
diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst index fbc0a9005d50d..d653cb53d6d5b 100644 --- a/doc/source/visualization.rst +++ b/doc/source/visualization.rst @@ -17,15 +17,6 @@ options.display.max_rows = 15 from pandas.compat import lrange -.. note:: - - All calls to ``np.random`` are seeded with 123456. - -.. note:: - - See :ref:`ecosystem <ecosystem.visualization>` for visualization libraries - that go beyond the basics included in pandas. - ******** Plotting ******** @@ -47,6 +38,14 @@ All the plots in the documentation are rendered with this option set to the pd.options.display.mpl_style = 'default' +We provide the basics in pandas to easily create decent looking plots. +See the :ref:`ecosystem <ecosystem.visualization>` section for visualization +libraries that go beyond the basics documented here. + +.. note:: + + All calls to ``np.random`` are seeded with 123456. + .. _visualization.basic: Basic Plotting: ``plot`` @@ -361,7 +360,7 @@ too dense to plot each point individually. .. ipython:: python df = DataFrame(randn(1000, 2), columns=['a', 'b']) - df['b'] = df['b'] = df['b'] + np.arange(1000) + df['b'] = df['b'] + np.arange(1000) @savefig hexbin_plot.png df.plot(kind='hexbin', x='a', y='b', gridsize=25) @@ -513,6 +512,9 @@ You can create a scatter plot matrix using the .. _visualization.kde: +Density Plot +~~~~~~~~~~~~ + .. versionadded:: 0.8.0 You can create density plots using the Series/DataFrame.plot and
DOC: Give KDE a section header DOC: remove extra slice in hexbin example DOC: move notes to look nicer A few changes that came up in https://github.com/pydata/pandas/pull/7027
https://api.github.com/repos/pandas-dev/pandas/pulls/7054
2014-05-06T13:39:00Z
2014-05-06T14:22:31Z
2014-05-06T14:22:31Z
2017-04-05T02:08:36Z
DOC: Small typo
diff --git a/doc/source/io.rst b/doc/source/io.rst index 249cfaf62878f..1aa6dde2c08b4 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -2875,7 +2875,7 @@ again **WILL TEND TO INCREASE THE FILE SIZE**. To *clean* the file, use Compression ~~~~~~~~~~~ -``PyTables`` allows the stored data to be compressed. Tthis applies to +``PyTables`` allows the stored data to be compressed. This applies to all kinds of stores, not just tables. - Pass ``complevel=int`` for a compression level (1-9, with 0 being no
https://api.github.com/repos/pandas-dev/pandas/pulls/7047
2014-05-05T20:29:06Z
2014-05-05T21:07:18Z
2014-05-05T21:07:18Z
2014-07-16T09:04:45Z
DOC: Add parameter to docstring
diff --git a/pandas/tools/tile.py b/pandas/tools/tile.py index 99fa1eaba79cc..c2512ba2b4b38 100644 --- a/pandas/tools/tile.py +++ b/pandas/tools/tile.py @@ -38,6 +38,10 @@ def cut(x, bins, right=True, labels=None, retbins=False, precision=3, retbins : bool, optional Whether to return the bins or not. Can be useful if bins is given as a scalar. + precision : int + The precision at which to store and display the bins labels + include_lowest : bool + Whether the first interval should be left-inclusive or not. Returns ------- @@ -121,6 +125,8 @@ def qcut(x, q, labels=None, retbins=False, precision=3): retbins : bool, optional Whether to return the bins or not. Can be useful if bins is given as a scalar. + precision : int + The precision at which to store and display the bins labels Returns -------
I just kinda guessed what `precision` does.
https://api.github.com/repos/pandas-dev/pandas/pulls/7046
2014-05-05T19:54:13Z
2014-05-05T21:04:26Z
2014-05-05T21:04:26Z
2014-06-28T15:35:13Z
API: update nth to use the _set_selection_from_grouper makes first==nth(0) and last==nth(-1)
diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst index 4bdc69be777ba..bbb5060acc35d 100644 --- a/doc/source/groupby.rst +++ b/doc/source/groupby.rst @@ -397,7 +397,7 @@ index are the group names and whose values are the sizes of each group. named *columns*. Aggregating functions are ones that reduce the dimension of the returned objects, - for example: ``mean, sum, size, count, std, var, describe, first, last, min, max``. This is + for example: ``mean, sum, size, count, std, var, describe, first, last, nth, min, max``. This is what happens when you do for example ``DataFrame.sum()`` and get back a ``Series``. .. _groupby.aggregate.multifunc: @@ -613,7 +613,7 @@ For dataframes with multiple columns, filters should explicitly specify a column a reduced shape of the original (and potentitally eliminating groups), but with the index unchanged. Passing ``as_index=False`` will not affect these transformation methods. - For example: ``head, tail nth``. + For example: ``head, tail``. .. ipython:: python diff --git a/doc/source/release.rst b/doc/source/release.rst index 463cf928660dd..05c6f2276c2de 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -190,7 +190,7 @@ API Changes validation warnings in :func:`read_csv`/:func:`read_table` (:issue:`6607`) - Raise a ``TypeError`` when ``DataFrame`` is passed an iterator as the ``data`` argument (:issue:`5357`) -- groupby will now not return the grouped column for non-cython functions (:issue:`5610`, :issue:`5614`), +- groupby will now not return the grouped column for non-cython functions (:issue:`5610`, :issue:`5614`, :issue:`6732`), as its already the index - ``DataFrame.plot`` and ``Series.plot`` now supports area plot with specifying ``kind='area'`` (:issue:`6656`) - Line plot can be stacked by ``stacked=True``. (:issue:`6656`) diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index c706312815e37..bf15812e91f8e 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -22,6 +22,8 @@ users upgrade to this version. - :ref:`API Changes <whatsnew_0140.api>` +- :ref:`Groupby API Changes <whatsnew_0140.groupby>` + - :ref:`Performance Improvements <whatsnew_0140.performance>` - :ref:`Prior Deprecations <whatsnew_0140.prior_deprecations>` @@ -95,57 +97,6 @@ API changes - Add ``is_month_start``, ``is_month_end``, ``is_quarter_start``, ``is_quarter_end``, ``is_year_start``, ``is_year_end`` accessors for ``DateTimeIndex`` / ``Timestamp`` which return a boolean array of whether the timestamp(s) are at the start/end of the month/quarter/year defined by the frequency of the ``DateTimeIndex`` / ``Timestamp`` (:issue:`4565`, :issue:`6998`) -- More consistent behaviour for some groupby methods: - - groupby ``head`` and ``tail`` now act more like ``filter`` rather than an aggregation: - - .. ipython:: python - - df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=['A', 'B']) - g = df.groupby('A') - g.head(1) # filters DataFrame - - g.apply(lambda x: x.head(1)) # used to simply fall-through - - groupby head and tail respect column selection: - - .. ipython:: python - - g[['B']].head(1) - - groupby ``nth`` now filters by default, with optional dropna argument to ignore - NaN (to replicate the previous behaviour.), See :ref:`the docs <groupby.nth>`. - - .. ipython:: python - - df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B']) - g = df.groupby('A') - g.nth(0) # can also use negative ints - - g.nth(0, dropna='any') # similar to old behaviour - - groupby will now not return the grouped column for non-cython functions (:issue:`5610`, :issue:`5614`), - as its already the index - - .. ipython:: python - - df = DataFrame([[1, np.nan], [1, 4], [5, 6], [5, 8]], columns=['A', 'B']) - g = df.groupby('A') - g.count() - g.describe() - - passing ``as_index`` will leave the grouped column in-place (this is not change in 0.14.0) - - .. ipython:: python - - df = DataFrame([[1, np.nan], [1, 4], [5, 6], [5, 8]], columns=['A', 'B']) - g = df.groupby('A',as_index=False) - g.count() - g.describe() - -- Allow specification of a more complex groupby via ``pd.Grouper``, such as grouping - by a Time and a string field simultaneously. See :ref:`the docs <groupby.specify>`. (:issue:`3794`) - - Local variable usage has changed in :func:`pandas.eval`/:meth:`DataFrame.eval`/:meth:`DataFrame.query` (:issue:`5987`). For the :class:`~pandas.DataFrame` methods, two things have @@ -247,6 +198,62 @@ API changes from 0.13.1 - Added ``factorize`` functions to ``Index`` and ``Series`` to get indexer and unique values (:issue:`7090`) +.. _whatsnew_0140.groupby: + +Groupby API Changes +~~~~~~~~~~~~~~~~~~~ + +More consistent behaviour for some groupby methods: + +- groupby ``head`` and ``tail`` now act more like ``filter`` rather than an aggregation: + + .. ipython:: python + + df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=['A', 'B']) + g = df.groupby('A') + g.head(1) # filters DataFrame + + g.apply(lambda x: x.head(1)) # used to simply fall-through + +- groupby head and tail respect column selection: + + .. ipython:: python + + g[['B']].head(1) + +- groupby ``nth`` now filters by default, with optional dropna argument to ignore + NaN (to replicate the previous behaviour.), See :ref:`the docs <groupby.nth>`. + + .. ipython:: python + + df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B']) + g = df.groupby('A') + g.nth(0) # can also use negative ints + + g.nth(0, dropna='any') # similar to old behaviour + +- groupby will now not return the grouped column for non-cython functions (:issue:`5610`, :issue:`5614`, :issue:`6732`), + as its already the index + + .. ipython:: python + + df = DataFrame([[1, np.nan], [1, 4], [5, 6], [5, 8]], columns=['A', 'B']) + g = df.groupby('A') + g.count() + g.describe() + +- passing ``as_index`` will leave the grouped column in-place (this is not change in 0.14.0) + + .. ipython:: python + + df = DataFrame([[1, np.nan], [1, 4], [5, 6], [5, 8]], columns=['A', 'B']) + g = df.groupby('A',as_index=False) + g.count() + g.describe() + +- Allow specification of a more complex groupby via ``pd.Grouper``, such as grouping + by a Time and a string field simultaneously. See :ref:`the docs <groupby.specify>`. (:issue:`3794`) + .. _whatsnew_0140.sql: SQL diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index b13b2121ac0c4..bce3a993171a7 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -99,6 +99,7 @@ class SpecificationError(GroupByError): def _groupby_function(name, alias, npfunc, numeric_only=True, _convert=False): def f(self): + self._set_selection_from_grouper() try: return self._cython_agg_general(alias, numeric_only=numeric_only) except AssertionError as e: @@ -356,6 +357,7 @@ class GroupBy(PandasObject): _apply_whitelist = _common_apply_whitelist _internal_names = ['_cache'] _internal_names_set = set(_internal_names) + _group_selection = None def __init__(self, obj, keys=None, axis=0, level=None, grouper=None, exclusions=None, selection=None, as_index=True, @@ -454,6 +456,8 @@ def _selection_list(self): def _selected_obj(self): if self._selection is None or isinstance(self.obj, Series): + if self._group_selection is not None: + return self.obj[self._group_selection] return self.obj else: return self.obj[self._selection] @@ -461,11 +465,11 @@ def _selected_obj(self): def _set_selection_from_grouper(self): """ we may need create a selection if we have non-level groupers """ grp = self.grouper - if self._selection is None and self.as_index and getattr(grp,'groupings',None) is not None: + if self.as_index and getattr(grp,'groupings',None) is not None: ax = self.obj._info_axis groupers = [ g.name for g in grp.groupings if g.level is None and g.name is not None and g.name in ax ] if len(groupers): - self._selection = (ax-Index(groupers)).tolist() + self._group_selection = (ax-Index(groupers)).tolist() def _local_dir(self): return sorted(set(self.obj._local_dir() + list(self._apply_whitelist))) @@ -776,6 +780,7 @@ def nth(self, n, dropna=None): """ + self._set_selection_from_grouper() if not dropna: # good choice m = self.grouper._max_groupsize if n >= m or n < -m: @@ -787,7 +792,21 @@ def nth(self, n, dropna=None): else: rng[- n - 1] = True is_nth = self._cumcount_array(rng, ascending=False) - return self._selected_obj[is_nth] + + result = self._selected_obj[is_nth] + + # the result index + if self.as_index: + ax = self.obj._info_axis + names = self.grouper.names + if all([ n in ax for n in names ]): + result.index = Index(self.obj[names][is_nth].values.ravel()).set_names(names) + elif self._group_selection is not None: + result.index = self.obj._get_axis(self.axis)[is_nth] + + result = result.sort_index() + + return result if (isinstance(self._selected_obj, DataFrame) and dropna not in ['any', 'all']): @@ -853,6 +872,7 @@ def cumcount(self, **kwargs): dtype: int64 """ + self._set_selection_from_grouper() ascending = kwargs.pop('ascending', True) index = self._selected_obj.index diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index e5d8b92f7094f..7a8fc8a3832db 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -166,18 +166,27 @@ def test_first_last_nth(self): # tests for first / last / nth grouped = self.df.groupby('A') first = grouped.first() - expected = self.df.ix[[1, 0], ['B', 'C', 'D']] - expected.index = ['bar', 'foo'] - assert_frame_equal(first, expected, check_names=False) + expected = self.df.ix[[1, 0], ['B','C','D']] + expected.index = Index(['bar', 'foo'],name='A') + expected = expected.sort_index() + assert_frame_equal(first, expected) + + nth = grouped.nth(0) + assert_frame_equal(nth, expected) last = grouped.last() - expected = self.df.ix[[5, 7], ['B', 'C', 'D']] - expected.index = ['bar', 'foo'] - assert_frame_equal(last, expected, check_names=False) + expected = self.df.ix[[5, 7], ['B','C','D']] + expected.index = Index(['bar', 'foo'],name='A') + assert_frame_equal(last, expected) + + nth = grouped.nth(-1) + assert_frame_equal(nth, expected) nth = grouped.nth(1) - expected = self.df.iloc[[2, 3]] - assert_frame_equal(nth, expected, check_names=False) + expected = self.df.ix[[2, 3],['B','C','D']].copy() + expected.index = Index(['foo', 'bar'],name='A') + expected = expected.sort_index() + assert_frame_equal(nth, expected) # it works! grouped['B'].first() @@ -189,6 +198,17 @@ def test_first_last_nth(self): self.assert_(com.isnull(grouped['B'].last()['foo'])) self.assert_(com.isnull(grouped['B'].nth(0)[0])) # not sure what this is testing + # v0.14.0 whatsnew + df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B']) + g = df.groupby('A') + result = g.first() + expected = df.iloc[[1,2]].set_index('A') + assert_frame_equal(result, expected) + + expected = df.iloc[[1,2]].set_index('A') + result = g.nth(0,dropna='any') + assert_frame_equal(result, expected) + def test_first_last_nth_dtypes(self): df = self.df_mixed_floats.copy() @@ -199,17 +219,21 @@ def test_first_last_nth_dtypes(self): grouped = df.groupby('A') first = grouped.first() expected = df.ix[[1, 0], ['B', 'C', 'D', 'E', 'F']] - expected.index = ['bar', 'foo'] - assert_frame_equal(first, expected, check_names=False) + expected.index = Index(['bar', 'foo'], name='A') + expected = expected.sort_index() + assert_frame_equal(first, expected) last = grouped.last() expected = df.ix[[5, 7], ['B', 'C', 'D', 'E', 'F']] - expected.index = ['bar', 'foo'] - assert_frame_equal(last, expected, check_names=False) + expected.index = Index(['bar', 'foo'], name='A') + expected = expected.sort_index() + assert_frame_equal(last, expected) nth = grouped.nth(1) - expected = df.iloc[[2, 3]] - assert_frame_equal(nth, expected, check_names=False) + expected = df.ix[[3, 2],['B', 'C', 'D', 'E', 'F']] + expected.index = Index(['bar', 'foo'], name='A') + expected = expected.sort_index() + assert_frame_equal(nth, expected) # GH 2763, first/last shifting dtypes idx = lrange(10) @@ -223,15 +247,15 @@ def test_nth(self): df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B']) g = df.groupby('A') - assert_frame_equal(g.nth(0), df.iloc[[0, 2]]) - assert_frame_equal(g.nth(1), df.iloc[[1]]) - assert_frame_equal(g.nth(2), df.loc[[]]) - assert_frame_equal(g.nth(-1), df.iloc[[1, 2]]) - assert_frame_equal(g.nth(-2), df.iloc[[0]]) - assert_frame_equal(g.nth(-3), df.loc[[]]) + assert_frame_equal(g.nth(0), df.iloc[[0, 2]].set_index('A')) + assert_frame_equal(g.nth(1), df.iloc[[1]].set_index('A')) + assert_frame_equal(g.nth(2), df.loc[[],['B']]) + assert_frame_equal(g.nth(-1), df.iloc[[1, 2]].set_index('A')) + assert_frame_equal(g.nth(-2), df.iloc[[0]].set_index('A')) + assert_frame_equal(g.nth(-3), df.loc[[],['B']]) assert_series_equal(g.B.nth(0), df.B.iloc[[0, 2]]) assert_series_equal(g.B.nth(1), df.B.iloc[[1]]) - assert_frame_equal(g[['B']].nth(0), df.ix[[0, 2], ['B']]) + assert_frame_equal(g[['B']].nth(0), df.ix[[0, 2], ['A', 'B']].set_index('A')) exp = df.set_index('A') assert_frame_equal(g.nth(0, dropna='any'), exp.iloc[[1, 2]])
closes #6732 `nth` sets the index appropriately and the same as first/last. this becomes less like head/tail in that `as_index` determines when you have an index here's the revised behavior: ``` In [1]: df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B']) In [3]: df Out[3]: A B 0 1 NaN 1 1 4 2 5 6 [3 rows x 2 columns] In [2]: g = df.groupby('A') In [9]: gni = df.groupby('A',as_index=False) ``` ``` In [5]: g.first() Out[5]: B A 1 4 5 6 [2 rows x 1 columns] # this was a regression from 0.13.1 (in that before this PR, this was returning like ``as_index=False``) In [6]: g.nth(0) Out[6]: B A 1 NaN 5 6 [2 rows x 1 columns] In [7]: g.nth(0,dropna='all') Out[7]: B A 1 4 5 6 [2 rows x 1 columns] ``` ``` In [10]: gni.nth(0) Out[10]: A B 0 1 NaN 2 5 6 [2 rows x 2 columns] In [11]: gni.nth(0,dropna='all') Out[11]: A B 0 1 4 1 5 6 [2 rows x 2 columns] ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7044
2014-05-05T12:43:30Z
2014-05-12T12:41:17Z
2014-05-12T12:41:17Z
2014-07-16T09:04:40Z
ENH/BUG: partial string indexing with PeriodIndex
diff --git a/doc/source/release.rst b/doc/source/release.rst index 4886850b928b1..b12f4eca010d9 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -498,6 +498,7 @@ Bug Fixes - Bug in ``Dataframe.set_index``, ``reindex`` and ``pivot`` don't preserve ``DatetimeIndex`` and ``PeriodIndex`` attributes (:issue:`3950`, :issue:`5878`, :issue:`6631`) - Bug in ``MultiIndex.get_level_values`` doesn't preserve ``DatetimeIndex`` and ``PeriodIndex`` attributes (:issue:`7092`) - Bug in ``Groupby`` doesn't preserve ``tz`` (:issue:`3950`) +- Bug in ``PeriodIndex`` partial string slicing (:issue:`6716`) pandas 0.13.1 ------------- diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 1cae66fada587..65796d95fed0a 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -299,8 +299,10 @@ intelligent functionality like selection, slicing, etc. ts[:5].index ts[::2].index -Partial String Indexing -~~~~~~~~~~~~~~~~~~~~~~~ +.. _timeseries.partialindexing: + +DatetimeIndex Partial String Indexing +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can pass in dates and strings that parse to dates as indexing parameters: @@ -1092,7 +1094,38 @@ objects: .. ipython:: python - Series(randn(len(prng)), prng) + ps = Series(randn(len(prng)), prng) + ps + +PeriodIndex Partial String Indexing +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You can pass in dates and strings to `Series` and `DataFrame` with `PeriodIndex`, as the same manner as `DatetimeIndex`. For details, refer to :ref:`DatetimeIndex Partial String Indexing <timeseries.partialindexing>`. + +.. ipython:: python + + ps['2011-01'] + + ps[datetime(2011, 12, 25):] + + ps['10/31/2011':'12/31/2011'] + +Passing string represents lower frequency than `PeriodIndex` returns partial sliced data. + +.. ipython:: python + + ps['2011'] + + dfp = DataFrame(randn(600,1), columns=['A'], + index=period_range('2013-01-01 9:00', periods=600, freq='T')) + dfp + dfp['2013-01-01 10H'] + +As the same as `DatetimeIndex`, the endpoints will be included in the result. Below example slices data starting from 10:00 to 11:59. + +.. ipython:: python + + dfp['2013-01-01 10H':'2013-01-01 11H'] Frequency Conversion and Resampling with PeriodIndex ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 49551c5bd3550..910c166e22ec5 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -591,6 +591,15 @@ Enhancements - str.wrap implemented (:issue:`6999`) +- `PeriodIndex` fully supports partial string indexing like `DatetimeIndex` (:issue:`7043`) + + .. ipython:: python + + prng = period_range('2013-01-01 09:00', periods=100, freq='H') + ps = Series(np.random.randn(len(prng)), index=prng) + ps + ps['2013-01-02'] + .. _whatsnew_0140.performance: Performance diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index 113be28f86976..b70a7dafa28bd 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -908,13 +908,13 @@ def get_value(self, series, key): pos = np.searchsorted(self.values, [ord1, ord2]) key = slice(pos[0], pos[1] + 1) return series[key] - else: + elif grp == freqn: key = Period(asdt, freq=self.freq).ordinal return _maybe_box(self, self._engine.get_value(s, key), series, key) + else: + raise KeyError(key) except TypeError: pass - except KeyError: - pass key = Period(key, self.freq).ordinal return _maybe_box(self, self._engine.get_value(s, key), series, key) @@ -978,8 +978,10 @@ def _get_string_slice(self, key): raise ValueError('Partial indexing only valid for ' 'ordered time series') - asdt, parsed, reso = parse_time_string(key, self.freq) - key = asdt + key, parsed, reso = parse_time_string(key, self.freq) + + grp = _freq_mod._infer_period_group(reso) + freqn = _freq_mod._period_group(self.freq) if reso == 'year': t1 = Period(year=parsed.year, freq='A') @@ -988,6 +990,19 @@ def _get_string_slice(self, key): elif reso == 'quarter': q = (parsed.month - 1) // 3 + 1 t1 = Period(year=parsed.year, quarter=q, freq='Q-DEC') + elif reso == 'day' and grp < freqn: + t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day, + freq='D') + elif reso == 'hour' and grp < freqn: + t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day, + hour=parsed.hour, freq='H') + elif reso == 'minute' and grp < freqn: + t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day, + hour=parsed.hour, minute=parsed.minute, freq='T') + elif reso == 'second' and grp < freqn: + t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day, + hour=parsed.hour, minute=parsed.minute, second=parsed.second, + freq='S') else: raise KeyError(key) diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index 4117ca660db35..419ab48a01a07 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -1851,6 +1851,97 @@ def test_pindex_slice_index(self): exp = s[12:24] assert_series_equal(res, exp) + def test_getitem_day(self): + # GH 6716 + # Confirm DatetimeIndex and PeriodIndex works identically + didx = DatetimeIndex(start='2013/01/01', freq='D', periods=400) + pidx = PeriodIndex(start='2013/01/01', freq='D', periods=400) + + for idx in [didx, pidx]: + # getitem against index should raise ValueError + values = ['2014', '2013/02', '2013/01/02', + '2013/02/01 9H', '2013/02/01 09:00'] + for v in values: + with tm.assertRaises(ValueError): + idx[v] + + s = Series(np.random.rand(len(idx)), index=idx) + assert_series_equal(s['2013/01'], s[0:31]) + assert_series_equal(s['2013/02'], s[31:59]) + assert_series_equal(s['2014'], s[365:]) + + invalid = ['2013/02/01 9H', '2013/02/01 09:00'] + for v in invalid: + with tm.assertRaises(KeyError): + s[v] + + def test_range_slice_day(self): + # GH 6716 + didx = DatetimeIndex(start='2013/01/01', freq='D', periods=400) + pidx = PeriodIndex(start='2013/01/01', freq='D', periods=400) + + for idx in [didx, pidx]: + # slices against index should raise IndexError + values = ['2014', '2013/02', '2013/01/02', + '2013/02/01 9H', '2013/02/01 09:00'] + for v in values: + with tm.assertRaises(IndexError): + idx[v:] + + s = Series(np.random.rand(len(idx)), index=idx) + + assert_series_equal(s['2013/01/02':], s[1:]) + assert_series_equal(s['2013/01/02':'2013/01/05'], s[1:5]) + assert_series_equal(s['2013/02':], s[31:]) + assert_series_equal(s['2014':], s[365:]) + + invalid = ['2013/02/01 9H', '2013/02/01 09:00'] + for v in invalid: + with tm.assertRaises(IndexError): + idx[v:] + + def test_getitem_seconds(self): + # GH 6716 + didx = DatetimeIndex(start='2013/01/01 09:00:00', freq='S', periods=4000) + pidx = PeriodIndex(start='2013/01/01 09:00:00', freq='S', periods=4000) + + for idx in [didx, pidx]: + # getitem against index should raise ValueError + values = ['2014', '2013/02', '2013/01/02', + '2013/02/01 9H', '2013/02/01 09:00'] + for v in values: + with tm.assertRaises(ValueError): + idx[v] + + s = Series(np.random.rand(len(idx)), index=idx) + + assert_series_equal(s['2013/01/01 10:00'], s[3600:3660]) + assert_series_equal(s['2013/01/01 9H'], s[:3600]) + for d in ['2013/01/01', '2013/01', '2013']: + assert_series_equal(s[d], s) + + def test_range_slice_seconds(self): + # GH 6716 + didx = DatetimeIndex(start='2013/01/01 09:00:00', freq='S', periods=4000) + pidx = PeriodIndex(start='2013/01/01 09:00:00', freq='S', periods=4000) + + for idx in [didx, pidx]: + # slices against index should raise IndexError + values = ['2014', '2013/02', '2013/01/02', + '2013/02/01 9H', '2013/02/01 09:00'] + for v in values: + with tm.assertRaises(IndexError): + idx[v:] + + s = Series(np.random.rand(len(idx)), index=idx) + + assert_series_equal(s['2013/01/01 09:05':'2013/01/01 09:10'], s[300:660]) + assert_series_equal(s['2013/01/01 10:00':'2013/01/01 10:05'], s[3600:3960]) + assert_series_equal(s['2013/01/01 10H':], s[3600:]) + assert_series_equal(s[:'2013/01/01 09:30'], s[:1860]) + for d in ['2013/01/01', '2013/01', '2013']: + assert_series_equal(s[d:], s) + def test_pindex_qaccess(self): pi = PeriodIndex(['2Q05', '3Q05', '4Q05', '1Q06', '2Q06'], freq='Q') s = Series(np.random.rand(len(pi)), index=pi).cumsum()
Closes #6716.
https://api.github.com/repos/pandas-dev/pandas/pulls/7043
2014-05-05T12:39:57Z
2014-05-13T14:29:35Z
2014-05-13T14:29:35Z
2014-06-17T01:47:00Z
TST: clean skipping tests in test_offsets
diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py index fb18db805164b..86635271eb9c1 100644 --- a/pandas/tseries/tests/test_offsets.py +++ b/pandas/tseries/tests/test_offsets.py @@ -97,7 +97,7 @@ def test_to_m8(): ### DateOffset Tests ##### -class TestBase(tm.TestCase): +class Base(tm.TestCase): _offset = None offset_types = [getattr(offsets, o) for o in offsets.__all__] @@ -119,8 +119,8 @@ def _get_offset(self, klass, value=1): def test_apply_out_of_range(self): if self._offset is None: - raise nose.SkipTest("_offset not defined to test out-of-range") - if self._offset in self.skip_np_u1p7: + return + if _np_version_under1p7 and self._offset in self.skip_np_u1p7: raise nose.SkipTest('numpy >= 1.7 required') # try to create an out-of-bounds result timestamp; if we can't create the offset @@ -132,11 +132,11 @@ def test_apply_out_of_range(self): self.assertIsInstance(result, datetime) except (OutOfBoundsDatetime): raise - except (ValueError, KeyError): - raise nose.SkipTest("cannot create out_of_range offset") + except (ValueError, KeyError) as e: + raise nose.SkipTest("cannot create out_of_range offset: {0} {1}".format(str(self).split('.')[-1],e)) -class TestOps(TestBase): +class TestOps(Base): def test_return_type(self): for offset in self.offset_types: @@ -157,7 +157,7 @@ def test_return_type(self): self.assert_((-offset).apply(NaT) is NaT) -class TestDateOffset(TestBase): +class TestDateOffset(Base): _multiprocess_can_split_ = True def setUp(self): @@ -197,7 +197,7 @@ def test_eq(self): self.assertNotEqual(offset1, offset2) -class TestBusinessDay(TestBase): +class TestBusinessDay(Base): _multiprocess_can_split_ = True _offset = BDay @@ -376,7 +376,7 @@ def test_offsets_compare_equal(self): self.assertFalse(offset1 != offset2) -class TestCustomBusinessDay(TestBase): +class TestCustomBusinessDay(Base): _multiprocess_can_split_ = True _offset = CDay @@ -641,7 +641,7 @@ def test_offsets_compare_equal(self): offset2 = self._object() self.assertFalse(offset1 != offset2) -class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, TestBase): +class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base): _object = CBMonthEnd def test_different_normalize_equals(self): @@ -756,7 +756,7 @@ def test_datetimeindex(self): self.assertEqual(DatetimeIndex(start='20120101',end='20130101',freq=CBMonthEnd(calendar=USFederalHolidayCalendar())).tolist()[0], datetime(2012,1,31)) -class TestCustomBusinessMonthBegin(CustomBusinessMonthBase, TestBase): +class TestCustomBusinessMonthBegin(CustomBusinessMonthBase, Base): _object = CBMonthBegin def test_different_normalize_equals(self): @@ -878,7 +878,7 @@ def assertOnOffset(offset, date, expected): (expected, actual, offset, date)) -class TestWeek(TestBase): +class TestWeek(Base): _offset = Week def test_repr(self): @@ -949,7 +949,7 @@ def test_offsets_compare_equal(self): self.assertFalse(offset1 != offset2) -class TestWeekOfMonth(TestBase): +class TestWeekOfMonth(Base): _offset = WeekOfMonth def test_constructor(self): @@ -1028,7 +1028,7 @@ def test_onOffset(self): offset = WeekOfMonth(week=week, weekday=weekday) self.assertEqual(offset.onOffset(date), expected) -class TestLastWeekOfMonth(TestBase): +class TestLastWeekOfMonth(Base): _offset = LastWeekOfMonth def test_constructor(self): @@ -1100,7 +1100,7 @@ def test_onOffset(self): self.assertEqual(offset.onOffset(date), expected, msg=date) -class TestBMonthBegin(TestBase): +class TestBMonthBegin(Base): _offset = BMonthBegin def test_offset(self): @@ -1162,7 +1162,7 @@ def test_offsets_compare_equal(self): self.assertFalse(offset1 != offset2) -class TestBMonthEnd(TestBase): +class TestBMonthEnd(Base): _offset = BMonthEnd def test_offset(self): @@ -1225,7 +1225,7 @@ def test_offsets_compare_equal(self): self.assertFalse(offset1 != offset2) -class TestMonthBegin(TestBase): +class TestMonthBegin(Base): _offset = MonthBegin def test_offset(self): @@ -1266,7 +1266,7 @@ def test_offset(self): assertEq(offset, base, expected) -class TestMonthEnd(TestBase): +class TestMonthEnd(Base): _offset = MonthEnd def test_offset(self): @@ -1334,7 +1334,7 @@ def test_onOffset(self): assertOnOffset(offset, date, expected) -class TestBQuarterBegin(TestBase): +class TestBQuarterBegin(Base): _offset = BQuarterBegin def test_repr(self): @@ -1425,7 +1425,7 @@ def test_offset(self): self.assertEqual(datetime(2007, 4, 3) + offset, datetime(2007, 4, 2)) -class TestBQuarterEnd(TestBase): +class TestBQuarterEnd(Base): _offset = BQuarterEnd def test_repr(self): @@ -1545,7 +1545,7 @@ def makeFY5253NearestEndMonth(*args, **kwds): def makeFY5253LastOfMonth(*args, **kwds): return FY5253(*args, variation="last", **kwds) -class TestFY5253LastOfMonth(TestBase): +class TestFY5253LastOfMonth(Base): def test_onOffset(self): @@ -1619,7 +1619,7 @@ def test_apply(self): current = current + offset self.assertEqual(current, datum) -class TestFY5253NearestEndMonth(TestBase): +class TestFY5253NearestEndMonth(Base): def test_get_target_month_end(self): self.assertEqual(makeFY5253NearestEndMonth(startingMonth=8, weekday=WeekDay.SAT).get_target_month_end(datetime(2013,1,1)), datetime(2013,8,31)) @@ -1737,7 +1737,7 @@ def test_apply(self): current = current + offset self.assertEqual(current, datum) -class TestFY5253LastOfMonthQuarter(TestBase): +class TestFY5253LastOfMonthQuarter(Base): def test_isAnchored(self): self.assert_(makeFY5253LastOfMonthQuarter(startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4).isAnchored()) @@ -1879,7 +1879,7 @@ def test_get_weeks(self): self.assertEqual(sat_dec_4.get_weeks(datetime(2011, 4, 2)), [13, 13, 13, 14]) self.assertEqual(sat_dec_1.get_weeks(datetime(2010, 12, 25)), [13, 13, 13, 13]) -class TestFY5253NearestEndMonthQuarter(TestBase): +class TestFY5253NearestEndMonthQuarter(Base): def test_onOffset(self): @@ -1955,7 +1955,7 @@ def test_offset(self): assertEq(offset2, datetime(2013,1,15), datetime(2013, 3, 30)) -class TestQuarterBegin(TestBase): +class TestQuarterBegin(Base): def test_repr(self): self.assertEqual(repr(QuarterBegin()), "<QuarterBegin: startingMonth=3>") @@ -2030,7 +2030,7 @@ def test_offset(self): self.assertEqual(datetime(2010, 2, 1) + offset, datetime(2010, 1, 1)) -class TestQuarterEnd(TestBase): +class TestQuarterEnd(Base): _offset = QuarterEnd def test_repr(self): @@ -2168,7 +2168,7 @@ def test_onOffset(self): assertOnOffset(offset, date, expected) -class TestBYearBegin(TestBase): +class TestBYearBegin(Base): _offset = BYearBegin def test_misspecified(self): @@ -2216,7 +2216,7 @@ def test_offset(self): assertEq(offset, base, expected) -class TestYearBegin(TestBase): +class TestYearBegin(Base): _offset = YearBegin def test_misspecified(self): @@ -2289,7 +2289,7 @@ def test_onOffset(self): assertOnOffset(offset, date, expected) -class TestBYearEndLagged(TestBase): +class TestBYearEndLagged(Base): def test_bad_month_fail(self): self.assertRaises(Exception, BYearEnd, month=13) @@ -2330,7 +2330,7 @@ def test_onOffset(self): assertOnOffset(offset, date, expected) -class TestBYearEnd(TestBase): +class TestBYearEnd(Base): _offset = BYearEnd def test_offset(self): @@ -2379,7 +2379,7 @@ def test_onOffset(self): assertOnOffset(offset, date, expected) -class TestYearEnd(TestBase): +class TestYearEnd(Base): _offset = YearEnd def test_misspecified(self): @@ -2431,7 +2431,7 @@ def test_onOffset(self): assertOnOffset(offset, date, expected) -class TestYearEndDiffMonth(TestBase): +class TestYearEndDiffMonth(Base): def test_offset(self): tests = []
https://api.github.com/repos/pandas-dev/pandas/pulls/7042
2014-05-05T11:50:26Z
2014-05-05T11:50:40Z
2014-05-05T11:50:40Z
2014-07-16T09:04:36Z
BUG: unstack fails in PeriodIndex
diff --git a/doc/source/release.rst b/doc/source/release.rst index 53abc22cd02f4..bf946840cdcd8 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -487,6 +487,7 @@ Bug Fixes views; mark ``is_copy`` on ``xs` only if its an actual copy (and not a view) (:issue:`7084`) - Bug in DatetimeIndex creation from string ndarray with ``dayfirst=True`` (:issue:`5917`) - Bug in ``MultiIndex.from_arrays`` created from ``DatetimeIndex`` doesn't preserve ``freq`` and ``tz`` (:issue:`7090`) +- Bug in ``unstack`` raises ``ValueError`` when ``MultiIndex`` contains ``PeriodIndex`` (:issue:`4342`) pandas 0.13.1 ------------- diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py index 75b95973a0f67..65eadff002eb6 100644 --- a/pandas/core/reshape.py +++ b/pandas/core/reshape.py @@ -18,6 +18,7 @@ import pandas.algos as algos from pandas.core.index import Index, MultiIndex +from pandas.tseries.period import PeriodIndex class _Unstacker(object): @@ -81,8 +82,11 @@ def __init__(self, values, index, level=-1, value_columns=None): labels = index.labels def _make_index(lev, lab): - i = lev.__class__(_make_index_array_level(lev.values, lab)) - i.name = lev.name + if isinstance(lev, PeriodIndex): + i = lev.copy() + else: + i = lev.__class__(_make_index_array_level(lev.values, lab)) + i.name = lev.name return i self.new_index_levels = [_make_index(lev, lab) diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py index 04e9f238d1dbe..0ea0b435a78be 100644 --- a/pandas/tests/test_categorical.py +++ b/pandas/tests/test_categorical.py @@ -13,6 +13,7 @@ from pandas.tseries.period import PeriodIndex from pandas.util.testing import assert_almost_equal import pandas.core.common as com +from pandas.tseries.period import PeriodIndex import pandas.util.testing as tm @@ -183,7 +184,7 @@ def test_empty_print(self): def test_periodindex(self): idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02', - '2014-03', '2014-03'], freq='M') + '2014-03', '2014-03'], freq='M') cat1 = Categorical.from_array(idx1) exp_arr = np.array([0, 0, 1, 1, 2, 2]) @@ -192,8 +193,9 @@ def test_periodindex(self): self.assert_numpy_array_equal(cat1.labels, exp_arr) self.assert_(cat1.levels.equals(exp_idx)) + idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01', - '2014-03', '2014-01'], freq='M') + '2014-03', '2014-01'], freq='M') cat2 = Categorical.from_array(idx2) exp_arr = np.array([2, 2, 1, 0, 2, 0]) diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 00f7b65f5690e..63bace138884f 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -964,6 +964,86 @@ def test_stack_unstack_multiple(self): expected = self.ymd.unstack(2).unstack(1).dropna(axis=1, how='all') assert_frame_equal(unstacked, expected.ix[:, unstacked.columns]) + def test_unstack_period_series(self): + # GH 4342 + idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02', + '2013-03', '2013-03'], freq='M', name='period') + idx2 = Index(['A', 'B'] * 3, name='str') + value = [1, 2, 3, 4, 5, 6] + + idx = MultiIndex.from_arrays([idx1, idx2]) + s = Series(value, index=idx) + + result1 = s.unstack() + result2 = s.unstack(level=1) + result3 = s.unstack(level=0) + + e_idx = pd.PeriodIndex(['2013-01', '2013-02', '2013-03'], freq='M', name='period') + expected = DataFrame({'A': [1, 3, 5], 'B': [2, 4, 6]}, index=e_idx, + columns=['A', 'B']) + expected.columns.name = 'str' + + assert_frame_equal(result1, expected) + assert_frame_equal(result2, expected) + assert_frame_equal(result3, expected.T) + + idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02', + '2013-03', '2013-03'], freq='M', name='period1') + + idx2 = pd.PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09', + '2013-08', '2013-07'], freq='M', name='period2') + idx = pd.MultiIndex.from_arrays([idx1, idx2]) + s = Series(value, index=idx) + + result1 = s.unstack() + result2 = s.unstack(level=1) + result3 = s.unstack(level=0) + + e_idx = pd.PeriodIndex(['2013-01', '2013-02', '2013-03'], freq='M', name='period1') + e_cols = pd.PeriodIndex(['2013-07', '2013-08', '2013-09', '2013-10', + '2013-11', '2013-12'], freq='M', name='period2') + expected = DataFrame([[np.nan, np.nan, np.nan, np.nan, 2, 1], + [np.nan, np.nan, 4, 3, np.nan, np.nan], + [6, 5, np.nan, np.nan, np.nan, np.nan]], + index=e_idx, columns=e_cols) + + assert_frame_equal(result1, expected) + assert_frame_equal(result2, expected) + assert_frame_equal(result3, expected.T) + + def test_unstack_period_frame(self): + # GH 4342 + idx1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-02', '2014-02', '2014-01', '2014-01'], + freq='M', name='period1') + idx2 = pd.PeriodIndex(['2013-12', '2013-12', '2014-02', '2013-10', '2013-10', '2014-02'], + freq='M', name='period2') + value = {'A': [1, 2, 3, 4, 5, 6], 'B': [6, 5, 4, 3, 2, 1]} + idx = pd.MultiIndex.from_arrays([idx1, idx2]) + df = pd.DataFrame(value, index=idx) + + result1 = df.unstack() + result2 = df.unstack(level=1) + result3 = df.unstack(level=0) + + e_1 = pd.PeriodIndex(['2014-01', '2014-02'], freq='M', name='period1') + e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02', '2013-10', + '2013-12', '2014-02'], freq='M', name='period2') + e_cols = pd.MultiIndex.from_arrays(['A A A B B B'.split(), e_2]) + expected = DataFrame([[5, 1, 6, 2, 6, 1], [4, 2, 3, 3, 5, 4]], + index=e_1, columns=e_cols) + + assert_frame_equal(result1, expected) + assert_frame_equal(result2, expected) + + e_1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-01', + '2014-02'], freq='M', name='period1') + e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02'], freq='M', name='period2') + e_cols = pd.MultiIndex.from_arrays(['A A B B'.split(), e_1]) + expected = DataFrame([[5, 4, 2, 3], [1, 2, 6, 5], [6, 3, 1, 4]], + index=e_2, columns=e_cols) + + assert_frame_equal(result3, expected) + def test_stack_multiple_bug(self): """ bug when some uniques are not present in the data #3170""" id_col = ([1] * 3) + ([2] * 3) diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index 43a4d4ff1239b..1a72c7925b6ee 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -2177,7 +2177,7 @@ def test_slice_keep_name(self): def test_factorize(self): idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02', - '2014-03', '2014-03'], freq='M') + '2014-03', '2014-03'], freq='M') exp_arr = np.array([0, 0, 1, 1, 2, 2]) exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
Closes #4342. Also changed `Categorical` and `PeriodIndex.factorize` to make index sorted like other indexes do.
https://api.github.com/repos/pandas-dev/pandas/pulls/7041
2014-05-05T11:45:58Z
2014-05-10T15:32:36Z
2014-05-10T15:32:36Z
2014-06-28T15:35:06Z
PERF: optimize Index.delete for dtype=object
diff --git a/doc/source/release.rst b/doc/source/release.rst index 9bfc3609f5b6d..121cfb92b0eb2 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -124,8 +124,9 @@ API Changes - ``concat`` will now concatenate mixed Series and DataFrames using the Series name or numbering columns as needed (:issue:`2385`) -- Slicing and advanced/boolean indexing operations on ``Index`` classes will no - longer change type of the resulting index (:issue:`6440`). +- Slicing and advanced/boolean indexing operations on ``Index`` classes as well + as :meth:`Index.delete` and :meth:`Index.drop` methods will no longer change type of the + resulting index (:issue:`6440`, :issue:`7040`) - ``set_index`` no longer converts MultiIndexes to an Index of tuples (:issue:`6459`). - Slicing with negative start, stop & step values handles corner cases better (:issue:`6531`): diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index c70e32fd18694..f19c1210b6a37 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -163,13 +163,15 @@ API changes - ``concat`` will now concatenate mixed Series and DataFrames using the Series name or numbering columns as needed (:issue:`2385`). See :ref:`the docs <merging.mixed_ndims>` -- Slicing and advanced/boolean indexing operations on ``Index`` classes will no - longer change type of the resulting index (:issue:`6440`) +- Slicing and advanced/boolean indexing operations on ``Index`` classes as well + as :meth:`Index.delete` and :meth:`Index.drop` methods will no longer change type of the + resulting index (:issue:`6440`, :issue:`7040`) .. ipython:: python i = pd.Index([1, 2, 3, 'a' , 'b', 'c']) i[[0,1,2]] + i.drop(['a', 'b', 'c']) Previously, the above operation would return ``Int64Index``. If you'd like to do this manually, use :meth:`Index.astype` diff --git a/pandas/core/index.py b/pandas/core/index.py index 96ecb66e86c67..ff6ee79bf24e4 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -1760,14 +1760,13 @@ def slice_locs(self, start=None, end=None): def delete(self, loc): """ - Make new Index with passed location deleted + Make new Index with passed location(-s) deleted Returns ------- new_index : Index """ - arr = np.delete(self.values, loc) - return Index(arr) + return np.delete(self, loc) def insert(self, loc, item): """
This should close #6933. The downside of this patch is that we'll lose type inference that might change index type in rare occasions when index items have different dtypes. This is the same issue that occurred in #6440, and it was ruled out as infrequent and worth dropping for extra performance. Note though, that the benchmark results are not exactly what I've expected: ``` python In [39]: idx = tm.makeStringIndex(100000) In [40]: timeit idx.delete(-1) 1000 loops, best of 3: 1.46 ms per loop In [41]: timeit np.delete(idx, -1) 1000 loops, best of 3: 1.05 ms per loop ``` The result is faster, but not as fast as I'd expect. This is probably because of object refcounting overhead, because MultiIndex fares a lot better with its Categorial-like implementation: ``` python In [42]: midx = pd.MultiIndex.from_product([[0], idx]) # <..snip..> In [45]: timeit midx.delete(-1) 1000 loops, best of 3: 225 µs per loop ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7040
2014-05-05T11:18:41Z
2014-05-05T13:27:58Z
2014-05-05T13:27:58Z
2014-06-16T01:51:43Z
BUG: DatetimeIndex cannot parse string ndarray with dayfirst
diff --git a/doc/source/release.rst b/doc/source/release.rst index 5eaa0aa469a3c..3e6f7bb232156 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -484,6 +484,7 @@ Bug Fixes or until the end of the line when ``colspec`` contains a ``None`` (previously raised a ``TypeError``) - Bug in cache coherence with chained indexing and slicing; add ``_is_view`` property to ``NDFrame`` to correctly predict views; mark ``is_copy`` on ``xs` only if its an actual copy (and not a view) (:issue:`7084`) +- Bug in DatetimeIndex creation from string ndarray with ``dayfirst=True`` (:issue:`5917`) pandas 0.13.1 ------------- diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index a2e01c8110261..d9018ad92eb17 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -250,20 +250,29 @@ def __new__(cls, data=None, else: subarr = data.view(_NS_DTYPE) else: - try: - subarr = tools.to_datetime(data, box=False) + if isinstance(data, ABCSeries): + values = data.values + else: + values = data - # make sure that we have a index/ndarray like (and not a Series) - if isinstance(subarr, ABCSeries): - subarr = subarr.values + if lib.is_string_array(values): + subarr = _str_to_dt_array(values, freq, dayfirst=dayfirst, + yearfirst=yearfirst) + else: + try: + subarr = tools.to_datetime(data, box=False) - except ValueError: - # tz aware - subarr = tools.to_datetime(data, box=False, utc=True) + # make sure that we have a index/ndarray like (and not a Series) + if isinstance(subarr, ABCSeries): + subarr = subarr.values + + except ValueError: + # tz aware + subarr = tools.to_datetime(data, box=False, utc=True) - if not np.issubdtype(subarr.dtype, np.datetime64): - raise ValueError('Unable to convert %s to datetime dtype' - % str(data)) + if not np.issubdtype(subarr.dtype, np.datetime64): + raise ValueError('Unable to convert %s to datetime dtype' + % str(data)) if isinstance(subarr, DatetimeIndex): if tz is None: diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index 319eaee6d14df..7690f118af482 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -2415,6 +2415,25 @@ def test_datetimeindex_constructor(self): self.assertEquals(len(idx1), len(idx2)) self.assertEquals(idx1.offset, idx2.offset) + def test_dayfirst(self): + # GH 5917 + arr = ['10/02/2014', '11/02/2014', '12/02/2014'] + expected = DatetimeIndex([datetime(2014, 2, 10), + datetime(2014, 2, 11), + datetime(2014, 2, 12)]) + idx1 = DatetimeIndex(arr, dayfirst=True) + idx2 = DatetimeIndex(np.array(arr), dayfirst=True) + idx3 = to_datetime(arr, dayfirst=True) + idx4 = to_datetime(np.array(arr), dayfirst=True) + idx5 = DatetimeIndex(Index(arr), dayfirst=True) + idx6 = DatetimeIndex(Series(arr), dayfirst=True) + self.assert_(expected.equals(idx1)) + self.assert_(expected.equals(idx2)) + self.assert_(expected.equals(idx3)) + self.assert_(expected.equals(idx4)) + self.assert_(expected.equals(idx5)) + self.assert_(expected.equals(idx6)) + def test_dti_snap(self): dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002', '1/5/2002', '1/6/2002', '1/7/2002'], freq='D')
Closes #5917.
https://api.github.com/repos/pandas-dev/pandas/pulls/7038
2014-05-05T04:12:11Z
2014-05-10T11:32:16Z
2014-05-10T11:32:16Z
2014-07-16T09:04:30Z
TST/CLN: centralize common validation methods in test_graphics
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index cb3f9183beb81..c158aee096429 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -7,13 +7,14 @@ import string from distutils.version import LooseVersion -from datetime import datetime, date, timedelta +from datetime import datetime, date from pandas import Series, DataFrame, MultiIndex, PeriodIndex, date_range from pandas.compat import range, lrange, StringIO, lmap, lzip, u, zip +from pandas.util.decorators import cache_readonly +import pandas.core.common as com import pandas.util.testing as tm from pandas.util.testing import ensure_clean -import pandas.core.common as com from pandas.core.config import set_option @@ -21,7 +22,7 @@ from numpy import random from numpy.random import rand, randn -from numpy.testing import assert_array_equal +from numpy.testing import assert_array_equal, assert_allclose from numpy.testing.decorators import slow import pandas.tools.plotting as plotting @@ -33,8 +34,309 @@ def _skip_if_no_scipy(): raise nose.SkipTest("no scipy") @tm.mplskip -class TestSeriesPlots(tm.TestCase): +class TestPlotBase(tm.TestCase): + + def setUp(self): + n = 100 + with tm.RNGContext(42): + gender = tm.choice(['Male', 'Female'], size=n) + classroom = tm.choice(['A', 'B', 'C'], size=n) + + self.hist_df = DataFrame({'gender': gender, + 'classroom': classroom, + 'height': random.normal(66, 4, size=n), + 'weight': random.normal(161, 32, size=n), + 'category': random.randint(4, size=n)}) + + def tearDown(self): + tm.close() + + @cache_readonly + def plt(self): + import matplotlib.pyplot as plt + return plt + + @cache_readonly + def colorconverter(self): + import matplotlib.colors as colors + return colors.colorConverter + + def _check_legend_labels(self, axes, labels=None, visible=True): + """ + Check each axes has expected legend labels + + Parameters + ---------- + axes : matplotlib Axes object, or its list-like + labels : list-like + expected legend labels + visible : bool + expected legend visibility. labels are checked only when visible is True + """ + + if visible and (labels is None): + raise ValueError('labels must be specified when visible is True') + axes = self._flatten_visible(axes) + for ax in axes: + if visible: + self.assertTrue(ax.get_legend() is not None) + self._check_text_labels(ax.get_legend().get_texts(), labels) + else: + self.assertTrue(ax.get_legend() is None) + + def _check_data(self, xp, rs): + """ + Check each axes has identical lines + + Parameters + ---------- + xp : matplotlib Axes object + rs : matplotlib Axes object + """ + xp_lines = xp.get_lines() + rs_lines = rs.get_lines() + + def check_line(xpl, rsl): + xpdata = xpl.get_xydata() + rsdata = rsl.get_xydata() + assert_allclose(xpdata, rsdata) + + self.assertEqual(len(xp_lines), len(rs_lines)) + [check_line(xpl, rsl) for xpl, rsl in zip(xp_lines, rs_lines)] + tm.close() + + def _check_visible(self, collections, visible=True): + """ + Check each artist is visible or not + + Parameters + ---------- + collections : list-like + list or collection of target artist + visible : bool + expected visibility + """ + + for patch in collections: + self.assertEqual(patch.get_visible(), visible) + + + def _get_colors_mapped(self, series, colors): + unique = series.unique() + # unique and colors length can be differed + # depending on slice value + mapped = dict(zip(unique, colors)) + return [mapped[v] for v in series.values] + + def _check_colors(self, collections, linecolors=None, facecolors=None, + mapping=None): + """ + Check each artist has expected line colors and face colors + + Parameters + ---------- + collections : list-like + list or collection of target artist + linecolors : list-like which has the same length as collections + list of expected line colors + facecolors : list-like which has the same length as collections + list of expected face colors + mapping : Series + Series used for color grouping key + used for andrew_curves, parallel_coordinates, radviz test + """ + + from matplotlib.lines import Line2D + from matplotlib.collections import Collection + conv = self.colorconverter + if linecolors is not None: + + if mapping is not None: + linecolors = self._get_colors_mapped(mapping, linecolors) + linecolors = linecolors[:len(collections)] + + self.assertEqual(len(collections), len(linecolors)) + for patch, color in zip(collections, linecolors): + if isinstance(patch, Line2D): + result = patch.get_color() + # Line2D may contains string color expression + result = conv.to_rgba(result) + else: + result = patch.get_edgecolor() + + expected = conv.to_rgba(color) + self.assertEqual(result, expected) + + if facecolors is not None: + + if mapping is not None: + facecolors = self._get_colors_mapped(mapping, facecolors) + facecolors = facecolors[:len(collections)] + + self.assertEqual(len(collections), len(facecolors)) + for patch, color in zip(collections, facecolors): + if isinstance(patch, Collection): + # returned as list of np.array + result = patch.get_facecolor()[0] + else: + result = patch.get_facecolor() + + if isinstance(result, np.ndarray): + result = tuple(result) + + expected = conv.to_rgba(color) + self.assertEqual(result, expected) + + def _check_text_labels(self, texts, expected): + """ + Check each text has expected labels + + Parameters + ---------- + texts : matplotlib Text object, or its list-like + target text, or its list + expected : str or list-like which has the same length as texts + expected text label, or its list + """ + if not com.is_list_like(texts): + self.assertEqual(texts.get_text(), expected) + else: + labels = [t.get_text() for t in texts] + self.assertEqual(len(labels), len(expected)) + for l, e in zip(labels, expected): + self.assertEqual(l, e) + + def _check_ticks_props(self, axes, xlabelsize=None, xrot=None, + ylabelsize=None, yrot=None): + """ + Check each axes has expected tick properties + + Parameters + ---------- + axes : matplotlib Axes object, or its list-like + xlabelsize : number + expected xticks font size + xrot : number + expected xticks rotation + ylabelsize : number + expected yticks font size + yrot : number + expected yticks rotation + """ + axes = self._flatten_visible(axes) + for ax in axes: + if xlabelsize or xrot: + xtick = ax.get_xticklabels()[0] + if xlabelsize is not None: + self.assertAlmostEqual(xtick.get_fontsize(), xlabelsize) + if xrot is not None: + self.assertAlmostEqual(xtick.get_rotation(), xrot) + + if ylabelsize or yrot: + ytick = ax.get_yticklabels()[0] + if ylabelsize is not None: + self.assertAlmostEqual(ytick.get_fontsize(), ylabelsize) + if yrot is not None: + self.assertAlmostEqual(ytick.get_rotation(), yrot) + + def _check_ax_scales(self, axes, xaxis='linear', yaxis='linear'): + """ + Check each axes has expected scales + + Parameters + ---------- + axes : matplotlib Axes object, or its list-like + xaxis : {'linear', 'log'} + expected xaxis scale + yaxis : {'linear', 'log'} + expected yaxis scale + """ + axes = self._flatten_visible(axes) + for ax in axes: + self.assertEqual(ax.xaxis.get_scale(), xaxis) + self.assertEqual(ax.yaxis.get_scale(), yaxis) + + def _check_axes_shape(self, axes, axes_num=None, layout=None, figsize=(8.0, 6.0)): + """ + Check expected number of axes is drawn in expected layout + + Parameters + ---------- + axes : matplotlib Axes object, or its list-like + axes_num : number + expected number of axes. Unnecessary axes should be set to invisible. + layout : tuple + expected layout + figsize : tuple + expected figsize. default is matplotlib default + """ + visible_axes = self._flatten_visible(axes) + + if axes_num is not None: + self.assertEqual(len(visible_axes), axes_num) + for ax in visible_axes: + # check something drawn on visible axes + self.assert_(len(ax.get_children()) > 0) + + if layout is not None: + if isinstance(axes, list): + self.assertEqual((len(axes), ), layout) + elif isinstance(axes, np.ndarray): + self.assertEqual(axes.shape, layout) + else: + # in case of AxesSubplot + self.assertEqual((1, ), layout) + + self.assert_numpy_array_equal(np.round(visible_axes[0].figure.get_size_inches()), + np.array(figsize)) + + def _flatten_visible(self, axes): + """ + Flatten axes, and filter only visible + + Parameters + ---------- + axes : matplotlib Axes object, or its list-like + + """ + axes = plotting._flatten(axes) + axes = [ax for ax in axes if ax.get_visible()] + return axes + + def _check_has_errorbars(self, axes, xerr=0, yerr=0): + """ + Check axes has expected number of errorbars + + Parameters + ---------- + axes : matplotlib Axes object, or its list-like + xerr : number + expected number of x errorbar + yerr : number + expected number of y errorbar + """ + + axes = self._flatten_visible(axes) + for ax in axes: + containers = ax.containers + xerr_count = 0 + yerr_count = 0 + for c in containers: + has_xerr = getattr(c, 'has_xerr', False) + has_yerr = getattr(c, 'has_yerr', False) + if has_xerr: + xerr_count += 1 + if has_yerr: + yerr_count += 1 + self.assertEqual(xerr, xerr_count) + self.assertEqual(yerr, yerr_count) + + +@tm.mplskip +class TestSeriesPlots(TestPlotBase): + def setUp(self): + TestPlotBase.setUp(self) import matplotlib as mpl self.mpl_le_1_2_1 = str(mpl.__version__) <= LooseVersion('1.2.1') self.ts = tm.makeTimeSeries() @@ -46,17 +348,22 @@ def setUp(self): self.iseries = tm.makePeriodSeries() self.iseries.name = 'iseries' - def tearDown(self): - tm.close() - @slow def test_plot(self): _check_plot_works(self.ts.plot, label='foo') _check_plot_works(self.ts.plot, use_index=False) - _check_plot_works(self.ts.plot, rot=0) - _check_plot_works(self.ts.plot, style='.', logy=True) - _check_plot_works(self.ts.plot, style='.', logx=True) - _check_plot_works(self.ts.plot, style='.', loglog=True) + axes = _check_plot_works(self.ts.plot, rot=0) + self._check_ticks_props(axes, xrot=0) + + ax = _check_plot_works(self.ts.plot, style='.', logy=True) + self._check_ax_scales(ax, yaxis='log') + + ax = _check_plot_works(self.ts.plot, style='.', logx=True) + self._check_ax_scales(ax, xaxis='log') + + ax = _check_plot_works(self.ts.plot, style='.', loglog=True) + self._check_ax_scales(ax, xaxis='log', yaxis='log') + _check_plot_works(self.ts[:10].plot, kind='bar') _check_plot_works(self.ts.plot, kind='area', stacked=False) _check_plot_works(self.iseries.plot) @@ -65,20 +372,19 @@ def test_plot(self): _check_plot_works(self.series[:5].plot, kind=kind) _check_plot_works(self.series[:10].plot, kind='barh') - _check_plot_works(Series(randn(10)).plot, kind='bar', color='black') + ax = _check_plot_works(Series(randn(10)).plot, kind='bar', color='black') + self._check_colors([ax.patches[0]], facecolors=['black']) # GH 6951 - _check_plot_works(self.ts.plot, subplots=True) + ax = _check_plot_works(self.ts.plot, subplots=True) + self._check_axes_shape(ax, axes_num=1, layout=(1, )) @slow def test_plot_figsize_and_title(self): # figsize and title - import matplotlib.pyplot as plt ax = self.series.plot(title='Test', figsize=(16, 8)) - - self.assertEqual(ax.title.get_text(), 'Test') - assert_array_equal(np.round(ax.figure.get_size_inches()), - np.array((16., 8.))) + self._check_text_labels(ax.title, 'Test') + self._check_axes_shape(ax, axes_num=1, layout=(1, ), figsize=(16, 8)) def test_ts_area_lim(self): ax = self.ts.plot(kind='area', stacked=False) @@ -121,15 +427,12 @@ def test_bar_log(self): def test_bar_ignore_index(self): df = Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd']) ax = df.plot(kind='bar', use_index=False) - expected = ['0', '1', '2', '3'] - result = [x.get_text() for x in ax.get_xticklabels()] - self.assertEqual(result, expected) + self._check_text_labels(ax.get_xticklabels(), ['0', '1', '2', '3']) def test_rotation(self): df = DataFrame(randn(5, 5)) - ax = df.plot(rot=30) - for l in ax.get_xticklabels(): - self.assertEqual(l.get_rotation(), 30) + axes = df.plot(rot=30) + self._check_ticks_props(axes, xrot=30) def test_irregular_datetime(self): rng = date_range('1/1/2000', '3/1/2000') @@ -146,34 +449,26 @@ def test_pie_series(self): series = Series(np.random.randint(1, 5), index=['a', 'b', 'c', 'd', 'e'], name='YLABEL') ax = _check_plot_works(series.plot, kind='pie') - for t, expected in zip(ax.texts, series.index): - self.assertEqual(t.get_text(), expected) + self._check_text_labels(ax.texts, series.index) self.assertEqual(ax.get_ylabel(), 'YLABEL') # without wedge labels ax = _check_plot_works(series.plot, kind='pie', labels=None) - for t, expected in zip(ax.texts, [''] * 5): - self.assertEqual(t.get_text(), expected) + self._check_text_labels(ax.texts, [''] * 5) # with less colors than elements color_args = ['r', 'g', 'b'] ax = _check_plot_works(series.plot, kind='pie', colors=color_args) - import matplotlib.colors as colors - conv = colors.colorConverter color_expected = ['r', 'g', 'b', 'r', 'g'] - for p, expected in zip(ax.patches, color_expected): - self.assertEqual(p.get_facecolor(), conv.to_rgba(expected)) + self._check_colors(ax.patches, facecolors=color_expected) # with labels and colors labels = ['A', 'B', 'C', 'D', 'E'] color_args = ['r', 'g', 'b', 'c', 'm'] ax = _check_plot_works(series.plot, kind='pie', labels=labels, colors=color_args) - - for t, expected in zip(ax.texts, labels): - self.assertEqual(t.get_text(), expected) - for p, expected in zip(ax.patches, color_args): - self.assertEqual(p.get_facecolor(), conv.to_rgba(expected)) + self._check_text_labels(ax.texts, labels) + self._check_colors(ax.patches, facecolors=color_args) # with autopct and fontsize ax = _check_plot_works(series.plot, kind='pie', colors=color_args, @@ -181,8 +476,8 @@ def test_pie_series(self): pcts = ['{0:.2f}'.format(s * 100) for s in series.values / float(series.sum())] iters = [iter(series.index), iter(pcts)] expected_texts = list(it.next() for it in itertools.cycle(iters)) - for t, expected in zip(ax.texts, expected_texts): - self.assertEqual(t.get_text(), expected) + self._check_text_labels(ax.texts, expected_texts) + for t in ax.texts: self.assertEqual(t.get_fontsize(), 7) # includes negative value @@ -194,8 +489,7 @@ def test_pie_series(self): series = Series([1, 2, np.nan, 4], index=['a', 'b', 'c', 'd'], name='YLABEL') ax = _check_plot_works(series.plot, kind='pie') - for t, expected in zip(ax.texts, series.index): - self.assertEqual(t.get_text(), expected) + self._check_text_labels(ax.texts, series.index) @slow def test_hist(self): @@ -205,14 +499,13 @@ def test_hist(self): _check_plot_works(self.ts.hist, by=self.ts.index.month) _check_plot_works(self.ts.hist, by=self.ts.index.month, bins=5) - import matplotlib.pyplot as plt - fig, ax = plt.subplots(1, 1) + fig, ax = self.plt.subplots(1, 1) _check_plot_works(self.ts.hist, ax=ax) _check_plot_works(self.ts.hist, ax=ax, figure=fig) _check_plot_works(self.ts.hist, figure=fig) tm.close() - fig, (ax1, ax2) = plt.subplots(1, 2) + fig, (ax1, ax2) = self.plt.subplots(1, 2) _check_plot_works(self.ts.hist, figure=fig, ax=ax1) _check_plot_works(self.ts.hist, figure=fig, ax=ax2) @@ -227,11 +520,7 @@ def test_hist_bins(self): @slow def test_hist_layout(self): - n = 10 - gender = tm.choice(['Male', 'Female'], size=n) - df = DataFrame({'gender': gender, - 'height': random.normal(66, 4, size=n), 'weight': - random.normal(161, 32, size=n)}) + df = self.hist_df with tm.assertRaises(ValueError): df.height.hist(layout=(1, 1)) @@ -240,28 +529,23 @@ def test_hist_layout(self): @slow def test_hist_layout_with_by(self): - import matplotlib.pyplot as plt - n = 10 - gender = tm.choice(['Male', 'Female'], size=n) - df = DataFrame({'gender': gender, - 'height': random.normal(66, 4, size=n), 'weight': - random.normal(161, 32, size=n), - 'category': random.randint(4, size=n)}) - _check_plot_works(df.height.hist, by=df.gender, layout=(2, 1)) - tm.close() + df = self.hist_df - _check_plot_works(df.height.hist, by=df.gender, layout=(1, 2)) - tm.close() + axes = _check_plot_works(df.height.hist, by=df.gender, layout=(2, 1)) + self._check_axes_shape(axes, axes_num=2, layout=(2, ), figsize=(10, 5)) - _check_plot_works(df.weight.hist, by=df.category, layout=(1, 4)) - tm.close() + axes = _check_plot_works(df.height.hist, by=df.category, layout=(4, 1)) + self._check_axes_shape(axes, axes_num=4, layout=(4, ), figsize=(10, 5)) - _check_plot_works(df.weight.hist, by=df.category, layout=(4, 1)) - tm.close() + axes = _check_plot_works(df.height.hist, by=df.classroom, layout=(2, 2)) + self._check_axes_shape(axes, axes_num=3, layout=(2, 2), figsize=(10, 5)) + + axes = _check_plot_works(df.height.hist, by=df.category, layout=(4, 2)) + self._check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(10, 5)) @slow def test_hist_no_overlap(self): - from matplotlib.pyplot import subplot, gcf, close + from matplotlib.pyplot import subplot, gcf x = Series(randn(2)) y = Series(randn(2)) subplot(121) @@ -280,15 +564,12 @@ def test_plot_fails_with_dupe_color_and_style(self): @slow def test_hist_by_no_extra_plots(self): - import matplotlib.pyplot as plt - n = 10 - df = DataFrame({'gender': ['Male'] * 5 + ['Female'] * 5, - 'height': random.normal(66, 4, size=n)}) + df = self.hist_df axes = df.height.hist(by=df.gender) - self.assertEqual(len(plt.get_fignums()), 1) + self.assertEqual(len(self.plt.get_fignums()), 1) def test_plot_fails_when_ax_differs_from_figure(self): - from pylab import figure, close + from pylab import figure fig1 = figure() fig2 = figure() ax1 = fig1.add_subplot(111) @@ -301,7 +582,7 @@ def test_kde(self): _check_plot_works(self.ts.plot, kind='kde') _check_plot_works(self.ts.plot, kind='density') ax = self.ts.plot(kind='kde', logy=True) - self.assertEqual(ax.get_yscale(), 'log') + self._check_ax_scales(ax, yaxis='log') @slow def test_kde_kwargs(self): @@ -310,15 +591,16 @@ def test_kde_kwargs(self): _check_plot_works(self.ts.plot, kind='kde', bw_method=.5, ind=linspace(-100,100,20)) _check_plot_works(self.ts.plot, kind='density', bw_method=.5, ind=linspace(-100,100,20)) ax = self.ts.plot(kind='kde', logy=True, bw_method=.5, ind=linspace(-100,100,20)) - self.assertEqual(ax.get_yscale(), 'log') + self._check_ax_scales(ax, yaxis='log') @slow def test_kde_color(self): _skip_if_no_scipy() ax = self.ts.plot(kind='kde', logy=True, color='r') + self._check_ax_scales(ax, yaxis='log') lines = ax.get_lines() self.assertEqual(len(lines), 1) - self.assertEqual(lines[0].get_color(), 'r') + self._check_colors(lines, ['r']) @slow def test_autocorrelation_plot(self): @@ -327,8 +609,7 @@ def test_autocorrelation_plot(self): _check_plot_works(autocorrelation_plot, self.ts.values) ax = autocorrelation_plot(self.ts, label='Test') - t = ax.get_legend().get_texts()[0].get_text() - self.assertEqual(t, 'Test') + self._check_legend_labels(ax, labels=['Test']) @slow def test_lag_plot(self): @@ -383,18 +664,18 @@ def test_errorbar_plot(self): kinds = ['line', 'bar'] for kind in kinds: ax = _check_plot_works(s.plot, yerr=Series(s_err), kind=kind) - _check_has_errorbars(self, ax, xerr=0, yerr=1) + self._check_has_errorbars(ax, xerr=0, yerr=1) ax = _check_plot_works(s.plot, yerr=s_err, kind=kind) - _check_has_errorbars(self, ax, xerr=0, yerr=1) + self._check_has_errorbars(ax, xerr=0, yerr=1) ax = _check_plot_works(s.plot, yerr=s_err.tolist(), kind=kind) - _check_has_errorbars(self, ax, xerr=0, yerr=1) + self._check_has_errorbars(ax, xerr=0, yerr=1) ax = _check_plot_works(s.plot, yerr=d_err, kind=kind) - _check_has_errorbars(self, ax, xerr=0, yerr=1) + self._check_has_errorbars(ax, xerr=0, yerr=1) ax = _check_plot_works(s.plot, xerr=0.2, yerr=0.2, kind=kind) - _check_has_errorbars(self, ax, xerr=1, yerr=1) + self._check_has_errorbars(ax, xerr=1, yerr=1) ax = _check_plot_works(s.plot, xerr=s_err) - _check_has_errorbars(self, ax, xerr=1, yerr=0) + self._check_has_errorbars(ax, xerr=1, yerr=0) # test time series plotting ix = date_range('1/1/2000', '1/1/2001', freq='M') @@ -403,9 +684,9 @@ def test_errorbar_plot(self): td_err = DataFrame(randn(12, 2), index=ix, columns=['x', 'y']) ax = _check_plot_works(ts.plot, yerr=ts_err) - _check_has_errorbars(self, ax, xerr=0, yerr=1) + self._check_has_errorbars(ax, xerr=0, yerr=1) ax = _check_plot_works(ts.plot, yerr=td_err) - _check_has_errorbars(self, ax, xerr=0, yerr=1) + self._check_has_errorbars(ax, xerr=0, yerr=1) # check incorrect lengths and types with tm.assertRaises(ValueError): @@ -421,23 +702,33 @@ def test_table(self): @tm.mplskip -class TestDataFramePlots(tm.TestCase): +class TestDataFramePlots(TestPlotBase): def setUp(self): + TestPlotBase.setUp(self) import matplotlib as mpl self.mpl_le_1_2_1 = str(mpl.__version__) <= LooseVersion('1.2.1') - def tearDown(self): - tm.close() + self.tdf = tm.makeTimeDataFrame() + self.hexbin_df = DataFrame({"A": np.random.uniform(size=20), + "B": np.random.uniform(size=20), + "C": np.arange(20) + np.random.uniform(size=20)}) + + from pandas import read_csv + path = os.path.join(curpath(), 'data', 'iris.csv') + self.iris = read_csv(path) @slow def test_plot(self): - df = tm.makeTimeDataFrame() + df = self.tdf _check_plot_works(df.plot, grid=False) - _check_plot_works(df.plot, subplots=True) + axes = _check_plot_works(df.plot, subplots=True) + self._check_axes_shape(axes, axes_num=4, layout=(4, )) _check_plot_works(df.plot, subplots=True, use_index=False) + self._check_axes_shape(axes, axes_num=4, layout=(4, )) df = DataFrame({'x': [1, 2], 'y': [3, 4]}) - self._check_plot_fails(df.plot, kind='line', blarg=True) + with tm.assertRaises(TypeError): + df.plot(kind='line', blarg=True) df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10])) @@ -446,7 +737,10 @@ def test_plot(self): _check_plot_works(df.plot, yticks=[1, 5, 10]) _check_plot_works(df.plot, xticks=[1, 5, 10]) _check_plot_works(df.plot, ylim=(-100, 100), xlim=(-100, 100)) - _check_plot_works(df.plot, subplots=True, title='blah') + + axes = _check_plot_works(df.plot, subplots=True, title='blah') + self._check_axes_shape(axes, axes_num=3, layout=(3, )) + _check_plot_works(df.plot, title='blah') tuples = lzip(string.ascii_letters[:10], range(10)) @@ -474,10 +768,10 @@ def test_plot(self): # GH 6951 # Test with single column df = DataFrame({'x': np.random.rand(10)}) - _check_plot_works(df.plot, kind='bar', subplots=True) + axes = _check_plot_works(df.plot, kind='bar', subplots=True) + self._check_axes_shape(axes, axes_num=1, layout=(1, )) def test_nonnumeric_exclude(self): - import matplotlib.pyplot as plt df = DataFrame({'A': ["x", "y", "z"], 'B': [1, 2, 3]}) ax = df.plot() self.assertEqual(len(ax.get_lines()), 1) # B was plotted @@ -486,19 +780,18 @@ def test_nonnumeric_exclude(self): def test_implicit_label(self): df = DataFrame(randn(10, 3), columns=['a', 'b', 'c']) ax = df.plot(x='a', y='b') - self.assertEqual(ax.xaxis.get_label().get_text(), 'a') + self._check_text_labels(ax.xaxis.get_label(), 'a') @slow def test_explicit_label(self): df = DataFrame(randn(10, 3), columns=['a', 'b', 'c']) ax = df.plot(x='a', y='b', label='LABEL') - self.assertEqual(ax.xaxis.get_label().get_text(), 'LABEL') + self._check_text_labels(ax.xaxis.get_label(), 'LABEL') @slow def test_plot_xy(self): - import matplotlib.pyplot as plt # columns.inferred_type == 'string' - df = tm.makeTimeDataFrame() + df = self.tdf self._check_data(df.plot(x=0, y=1), df.set_index('A')['B'].plot()) self._check_data(df.plot(x=0), df.set_index('A').plot()) @@ -517,10 +810,8 @@ def test_plot_xy(self): # figsize and title ax = df.plot(x=1, y=2, title='Test', figsize=(16, 8)) - - self.assertEqual(ax.title.get_text(), 'Test') - assert_array_equal(np.round(ax.figure.get_size_inches()), - np.array((16., 8.))) + self._check_text_labels(ax.title, 'Test') + self._check_axes_shape(ax, axes_num=1, layout=(1, ), figsize=(16., 8.)) # columns.inferred_type == 'mixed' # TODO add MultiIndex test @@ -530,23 +821,19 @@ def test_logscales(self): df = DataFrame({'a': np.arange(100)}, index=np.arange(100)) ax = df.plot(logy=True) - self.assertEqual(ax.xaxis.get_scale(), 'linear') - self.assertEqual(ax.yaxis.get_scale(), 'log') + self._check_ax_scales(ax, yaxis='log') ax = df.plot(logx=True) - self.assertEqual(ax.xaxis.get_scale(), 'log') - self.assertEqual(ax.yaxis.get_scale(), 'linear') + self._check_ax_scales(ax, xaxis='log') ax = df.plot(loglog=True) - self.assertEqual(ax.xaxis.get_scale(), 'log') - self.assertEqual(ax.yaxis.get_scale(), 'log') + self._check_ax_scales(ax, xaxis='log', yaxis='log') @slow def test_xcompat(self): import pandas as pd - import matplotlib.pyplot as plt - df = tm.makeTimeDataFrame() + df = self.tdf ax = df.plot(x_compat=True) lines = ax.get_lines() self.assertNotIsInstance(lines[0].get_xdata(), PeriodIndex) @@ -584,18 +871,6 @@ def test_unsorted_index(self): rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64) tm.assert_series_equal(rs, df.y) - def _check_data(self, xp, rs): - xp_lines = xp.get_lines() - rs_lines = rs.get_lines() - - def check_line(xpl, rsl): - xpdata = xpl.get_xydata() - rsdata = rsl.get_xydata() - assert_array_equal(xpdata, rsdata) - - [check_line(xpl, rsl) for xpl, rsl in zip(xp_lines, rs_lines)] - tm.close() - @slow def test_subplots(self): df = DataFrame(np.random.rand(10, 3), @@ -603,28 +878,22 @@ def test_subplots(self): for kind in ['bar', 'barh', 'line', 'area']: axes = df.plot(kind=kind, subplots=True, sharex=True, legend=True) + self._check_axes_shape(axes, axes_num=3, layout=(3, )) for ax, column in zip(axes, df.columns): - self._check_legend_labels(ax, [column]) + self._check_legend_labels(ax, labels=[com.pprint_thing(column)]) - axes = df.plot(kind=kind, subplots=True, sharex=True) for ax in axes[:-2]: - [self.assert_(not label.get_visible()) - for label in ax.get_xticklabels()] - [self.assert_(label.get_visible()) - for label in ax.get_yticklabels()] + self._check_visible(ax.get_xticklabels(), visible=False) + self._check_visible(ax.get_yticklabels()) - [self.assert_(label.get_visible()) - for label in axes[-1].get_xticklabels()] - [self.assert_(label.get_visible()) - for label in axes[-1].get_yticklabels()] + self._check_visible(axes[-1].get_xticklabels()) + self._check_visible(axes[-1].get_yticklabels()) axes = df.plot(kind=kind, subplots=True, sharex=False) for ax in axes: - [self.assert_(label.get_visible()) - for label in ax.get_xticklabels()] - [self.assert_(label.get_visible()) - for label in ax.get_yticklabels()] + self._check_visible(ax.get_xticklabels()) + self._check_visible(ax.get_yticklabels()) axes = df.plot(kind=kind, subplots=True, legend=False) for ax in axes: @@ -731,63 +1000,35 @@ def test_area_lim(self): @slow def test_bar_colors(self): import matplotlib.pyplot as plt - import matplotlib.colors as colors default_colors = plt.rcParams.get('axes.color_cycle') - custom_colors = 'rgcby' + df = DataFrame(randn(5, 5)) ax = df.plot(kind='bar') - - rects = ax.patches - - conv = colors.colorConverter - for i, rect in enumerate(rects[::5]): - xp = conv.to_rgba(default_colors[i % len(default_colors)]) - rs = rect.get_facecolor() - self.assertEqual(xp, rs) - + self._check_colors(ax.patches[::5], facecolors=default_colors[:5]) tm.close() + custom_colors = 'rgcby' ax = df.plot(kind='bar', color=custom_colors) - - rects = ax.patches - - conv = colors.colorConverter - for i, rect in enumerate(rects[::5]): - xp = conv.to_rgba(custom_colors[i]) - rs = rect.get_facecolor() - self.assertEqual(xp, rs) - + self._check_colors(ax.patches[::5], facecolors=custom_colors) tm.close() - from matplotlib import cm + from matplotlib import cm # Test str -> colormap functionality ax = df.plot(kind='bar', colormap='jet') - - rects = ax.patches - rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5)) - for i, rect in enumerate(rects[::5]): - xp = rgba_colors[i] - rs = rect.get_facecolor() - self.assertEqual(xp, rs) - + self._check_colors(ax.patches[::5], facecolors=rgba_colors) tm.close() # Test colormap functionality ax = df.plot(kind='bar', colormap=cm.jet) - - rects = ax.patches - rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5)) - for i, rect in enumerate(rects[::5]): - xp = rgba_colors[i] - rs = rect.get_facecolor() - self.assertEqual(xp, rs) - + self._check_colors(ax.patches[::5], facecolors=rgba_colors) tm.close() - df.ix[:, [0]].plot(kind='bar', color='DodgerBlue') + + ax = df.ix[:, [0]].plot(kind='bar', color='DodgerBlue') + self._check_colors([ax.patches[0]], facecolors=['DodgerBlue']) @slow def test_bar_linewidth(self): @@ -805,6 +1046,7 @@ def test_bar_linewidth(self): # subplots axes = df.plot(kind='bar', linewidth=2, subplots=True) + self._check_axes_shape(axes, axes_num=5, layout=(5, )) for ax in axes: for r in ax.patches: self.assertEqual(r.get_linewidth(), 2) @@ -859,7 +1101,6 @@ def test_bar_barwidth_position(self): @slow def test_plot_scatter(self): - from matplotlib.pylab import close df = DataFrame(randn(6, 4), index=list(string.ascii_letters[:6]), columns=['x', 'y', 'z', 'four']) @@ -874,11 +1115,10 @@ def test_plot_scatter(self): # GH 6951 axes = df.plot(x='x', y='y', kind='scatter', subplots=True) - self.assertEqual(len(axes[0].figure.axes), 1) + self._check_axes_shape(axes, axes_num=1, layout=(1, )) @slow def test_plot_bar(self): - from matplotlib.pylab import close df = DataFrame(randn(6, 4), index=list(string.ascii_letters[:6]), columns=['one', 'two', 'three', 'four']) @@ -906,8 +1146,7 @@ def _check_bar_alignment(self, df, kind='bar', stacked=False, tick_pos = np.arange(len(df)) - if not isinstance(axes, np.ndarray): - axes = [axes] + axes = self._flatten_visible(axes) for ax in axes: if kind == 'bar': @@ -1085,20 +1324,19 @@ def test_boxplot(self): _check_plot_works(df.boxplot, by='X') # When ax is supplied, existing axes should be used: - import matplotlib.pyplot as plt - fig, ax = plt.subplots() + fig, ax = self.plt.subplots() axes = df.boxplot('Col1', by='X', ax=ax) self.assertIs(ax.get_axes(), axes) # Multiple columns with an ax argument is not supported - fig, ax = plt.subplots() + fig, ax = self.plt.subplots() self.assertRaisesRegexp( ValueError, 'existing axis', df.boxplot, column=['Col1', 'Col2'], by='X', ax=ax ) # When by is None, check that all relevant lines are present in the dict - fig, ax = plt.subplots() + fig, ax = self.plt.subplots() d = df.boxplot(ax=ax) lines = list(itertools.chain.from_iterable(d.values())) self.assertEqual(len(ax.get_lines()), len(lines)) @@ -1107,17 +1345,18 @@ def test_boxplot(self): def test_kde(self): _skip_if_no_scipy() df = DataFrame(randn(100, 4)) - _check_plot_works(df.plot, kind='kde') - _check_plot_works(df.plot, kind='kde', subplots=True) - ax = df.plot(kind='kde') - self._check_legend_labels(ax, df.columns) + ax = _check_plot_works(df.plot, kind='kde') + expected = [com.pprint_thing(c) for c in df.columns] + self._check_legend_labels(ax, labels=expected) + + axes = _check_plot_works(df.plot, kind='kde', subplots=True) + self._check_axes_shape(axes, axes_num=4, layout=(4, )) + axes = df.plot(kind='kde', logy=True, subplots=True) - for ax in axes: - self.assertEqual(ax.get_yscale(), 'log') + self._check_ax_scales(axes, yaxis='log') @slow def test_hist(self): - import matplotlib.pyplot as plt df = DataFrame(randn(100, 4)) _check_plot_works(df.hist) _check_plot_works(df.hist, grid=False) @@ -1146,27 +1385,17 @@ def test_hist(self): # make sure xlabelsize and xrot are handled ser = df[0] - xf, yf = 20, 20 - xrot, yrot = 30, 30 - ax = ser.hist(xlabelsize=xf, xrot=30, ylabelsize=yf, yrot=30) - ytick = ax.get_yticklabels()[0] - xtick = ax.get_xticklabels()[0] - self.assertAlmostEqual(ytick.get_fontsize(), yf) - self.assertAlmostEqual(ytick.get_rotation(), yrot) - self.assertAlmostEqual(xtick.get_fontsize(), xf) - self.assertAlmostEqual(xtick.get_rotation(), xrot) - - xf, yf = 20, 20 - xrot, yrot = 30, 30 - axes = df.hist(xlabelsize=xf, xrot=30, ylabelsize=yf, yrot=30) - for i, ax in enumerate(axes.ravel()): - if i < len(df.columns): - ytick = ax.get_yticklabels()[0] - xtick = ax.get_xticklabels()[0] - self.assertAlmostEqual(ytick.get_fontsize(), yf) - self.assertAlmostEqual(ytick.get_rotation(), yrot) - self.assertAlmostEqual(xtick.get_fontsize(), xf) - self.assertAlmostEqual(xtick.get_rotation(), xrot) + xf, yf = 20, 18 + xrot, yrot = 30, 40 + axes = ser.hist(xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot) + self._check_ticks_props(axes, xlabelsize=xf, xrot=xrot, + ylabelsize=yf, yrot=yrot) + + xf, yf = 20, 18 + xrot, yrot = 30, 40 + axes = df.hist(xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot) + self._check_ticks_props(axes, xlabelsize=xf, xrot=xrot, + ylabelsize=yf, yrot=yrot) tm.close() # make sure kwargs to hist are handled @@ -1177,7 +1406,7 @@ def test_hist(self): tm.close() ax = ser.hist(log=True) # scale of y must be 'log' - self.assertEqual(ax.get_yscale(), 'log') + self._check_ax_scales(ax, yaxis='log') tm.close() @@ -1187,8 +1416,7 @@ def test_hist(self): @slow def test_hist_layout(self): - import matplotlib.pyplot as plt - df = DataFrame(randn(100, 4)) + df = DataFrame(randn(100, 3)) layout_to_expected_size = ( {'layout': None, 'expected_size': (2, 2)}, # default is 2x2 @@ -1199,9 +1427,9 @@ def test_hist_layout(self): ) for layout_test in layout_to_expected_size: - ax = df.hist(layout=layout_test['layout']) - self.assertEqual(len(ax), layout_test['expected_size'][0]) - self.assertEqual(len(ax[0]), layout_test['expected_size'][1]) + axes = df.hist(layout=layout_test['layout']) + expected = layout_test['expected_size'] + self._check_axes_shape(axes, axes_num=3, layout=expected) # layout too small for all 4 plots with tm.assertRaises(ValueError): @@ -1238,19 +1466,24 @@ def scat2(x, y, by=None, ax=None, figsize=None): @slow def test_andrews_curves(self): - from pandas import read_csv from pandas.tools.plotting import andrews_curves from matplotlib import cm - path = os.path.join(curpath(), 'data', 'iris.csv') - df = read_csv(path) + df = self.iris _check_plot_works(andrews_curves, df, 'Name') - _check_plot_works(andrews_curves, df, 'Name', - color=('#556270', '#4ECDC4', '#C7F464')) - _check_plot_works(andrews_curves, df, 'Name', - color=['dodgerblue', 'aquamarine', 'seagreen']) - _check_plot_works(andrews_curves, df, 'Name', colormap=cm.jet) + + rgba = ('#556270', '#4ECDC4', '#C7F464') + ax = _check_plot_works(andrews_curves, df, 'Name', color=rgba) + self._check_colors(ax.get_lines()[:10], linecolors=rgba, mapping=df['Name'][:10]) + + cnames = ['dodgerblue', 'aquamarine', 'seagreen'] + ax = _check_plot_works(andrews_curves, df, 'Name', color=cnames) + self._check_colors(ax.get_lines()[:10], linecolors=cnames, mapping=df['Name'][:10]) + + ax = _check_plot_works(andrews_curves, df, 'Name', colormap=cm.jet) + cmaps = lmap(cm.jet, np.linspace(0, 1, df['Name'].nunique())) + self._check_colors(ax.get_lines()[:10], linecolors=cmaps, mapping=df['Name'][:10]) colors = ['b', 'g', 'r'] df = DataFrame({"A": [1, 2, 3], @@ -1258,61 +1491,70 @@ def test_andrews_curves(self): "C": [1, 2, 3], "Name": colors}) ax = andrews_curves(df, 'Name', color=colors) - legend_colors = [l.get_color() for l in ax.legend().get_lines()] - self.assertEqual(colors, legend_colors) + handles, labels = ax.get_legend_handles_labels() + self._check_colors(handles, linecolors=colors) with tm.assert_produces_warning(FutureWarning): andrews_curves(data=df, class_column='Name') @slow def test_parallel_coordinates(self): - from pandas import read_csv from pandas.tools.plotting import parallel_coordinates from matplotlib import cm - path = os.path.join(curpath(), 'data', 'iris.csv') - df = read_csv(path) - _check_plot_works(parallel_coordinates, df, 'Name') - _check_plot_works(parallel_coordinates, df, 'Name', - color=('#556270', '#4ECDC4', '#C7F464')) - _check_plot_works(parallel_coordinates, df, 'Name', - color=['dodgerblue', 'aquamarine', 'seagreen']) - _check_plot_works(parallel_coordinates, df, 'Name', colormap=cm.jet) - - df = read_csv(path, header=None, skiprows=1, names=[1, 2, 4, 8, - 'Name']) - _check_plot_works(parallel_coordinates, df, 'Name', use_columns=True) - _check_plot_works(parallel_coordinates, df, 'Name', - xticks=[1, 5, 25, 125]) + df = self.iris + + _check_plot_works(parallel_coordinates, df, 'Name') + + rgba = ('#556270', '#4ECDC4', '#C7F464') + ax = _check_plot_works(parallel_coordinates, df, 'Name', color=rgba) + self._check_colors(ax.get_lines()[:10], linecolors=rgba, mapping=df['Name'][:10]) + + cnames = ['dodgerblue', 'aquamarine', 'seagreen'] + ax = _check_plot_works(parallel_coordinates, df, 'Name', color=cnames) + self._check_colors(ax.get_lines()[:10], linecolors=cnames, mapping=df['Name'][:10]) + + ax = _check_plot_works(parallel_coordinates, df, 'Name', colormap=cm.jet) + cmaps = lmap(cm.jet, np.linspace(0, 1, df['Name'].nunique())) + self._check_colors(ax.get_lines()[:10], linecolors=cmaps, mapping=df['Name'][:10]) + colors = ['b', 'g', 'r'] df = DataFrame({"A": [1, 2, 3], "B": [1, 2, 3], "C": [1, 2, 3], "Name": colors}) ax = parallel_coordinates(df, 'Name', color=colors) - legend_colors = [l.get_color() for l in ax.legend().get_lines()] - self.assertEqual(colors, legend_colors) - - with tm.assert_produces_warning(FutureWarning): - parallel_coordinates(df, 'Name', colors=colors) + handles, labels = ax.get_legend_handles_labels() + self._check_colors(handles, linecolors=colors) with tm.assert_produces_warning(FutureWarning): parallel_coordinates(data=df, class_column='Name') + with tm.assert_produces_warning(FutureWarning): + parallel_coordinates(df, 'Name', colors=colors) @slow def test_radviz(self): - from pandas import read_csv from pandas.tools.plotting import radviz from matplotlib import cm - path = os.path.join(curpath(), 'data', 'iris.csv') - df = read_csv(path) + df = self.iris _check_plot_works(radviz, df, 'Name') - _check_plot_works(radviz, df, 'Name', - color=('#556270', '#4ECDC4', '#C7F464')) - _check_plot_works(radviz, df, 'Name', - color=['dodgerblue', 'aquamarine', 'seagreen']) + + rgba = ('#556270', '#4ECDC4', '#C7F464') + ax = _check_plot_works(radviz, df, 'Name', color=rgba) + # skip Circle drawn as ticks + patches = [p for p in ax.patches[:20] if p.get_label() != ''] + self._check_colors(patches[:10], facecolors=rgba, mapping=df['Name'][:10]) + + cnames = ['dodgerblue', 'aquamarine', 'seagreen'] + _check_plot_works(radviz, df, 'Name', color=cnames) + patches = [p for p in ax.patches[:20] if p.get_label() != ''] + self._check_colors(patches, facecolors=cnames, mapping=df['Name'][:10]) + _check_plot_works(radviz, df, 'Name', colormap=cm.jet) + cmaps = lmap(cm.jet, np.linspace(0, 1, df['Name'].nunique())) + patches = [p for p in ax.patches[:20] if p.get_label() != ''] + self._check_colors(patches, facecolors=cmaps, mapping=df['Name'][:10]) colors = [[0., 0., 1., 1.], [0., 0.5, 1., 1.], @@ -1322,25 +1564,17 @@ def test_radviz(self): "C": [3, 2, 1], "Name": ['b', 'g', 'r']}) ax = radviz(df, 'Name', color=colors) - legend_colors = [c.get_facecolor().squeeze().tolist() - for c in ax.collections] - self.assertEqual(colors, legend_colors) + handles, labels = ax.get_legend_handles_labels() + self._check_colors(handles, facecolors=colors) @slow def test_plot_int_columns(self): df = DataFrame(randn(100, 4)).cumsum() _check_plot_works(df.plot, legend=True) - def _check_legend_labels(self, ax, labels): - import pandas.core.common as com - labels = [com.pprint_thing(l) for l in labels] - self.assertTrue(ax.get_legend() is not None) - legend_labels = [t.get_text() for t in ax.get_legend().get_texts()] - self.assertEqual(labels, legend_labels) - @slow def test_df_legend_labels(self): - kinds = 'line', 'bar', 'barh', 'kde', 'density', 'area' + kinds = 'line', 'bar', 'barh', 'kde', 'area' df = DataFrame(rand(3, 3), columns=['a', 'b', 'c']) df2 = DataFrame(rand(3, 3), columns=['d', 'e', 'f']) df3 = DataFrame(rand(3, 3), columns=['g', 'h', 'i']) @@ -1348,25 +1582,25 @@ def test_df_legend_labels(self): for kind in kinds: ax = df.plot(kind=kind, legend=True) - self._check_legend_labels(ax, df.columns) + self._check_legend_labels(ax, labels=df.columns) ax = df2.plot(kind=kind, legend=False, ax=ax) - self._check_legend_labels(ax, df.columns) + self._check_legend_labels(ax, labels=df.columns) ax = df3.plot(kind=kind, legend=True, ax=ax) - self._check_legend_labels(ax, df.columns + df3.columns) + self._check_legend_labels(ax, labels=df.columns + df3.columns) ax = df4.plot(kind=kind, legend='reverse', ax=ax) expected = list(df.columns + df3.columns) + list(reversed(df4.columns)) - self._check_legend_labels(ax, expected) + self._check_legend_labels(ax, labels=expected) # Secondary Y ax = df.plot(legend=True, secondary_y='b') - self._check_legend_labels(ax, ['a', 'b (right)', 'c']) + self._check_legend_labels(ax, labels=['a', 'b (right)', 'c']) ax = df2.plot(legend=False, ax=ax) - self._check_legend_labels(ax, ['a', 'b (right)', 'c']) + self._check_legend_labels(ax, labels=['a', 'b (right)', 'c']) ax = df3.plot(kind='bar', legend=True, secondary_y='h', ax=ax) - self._check_legend_labels(ax, ['a', 'b (right)', 'c', 'g', 'h (right)', 'i']) + self._check_legend_labels(ax, labels=['a', 'b (right)', 'c', 'g', 'h (right)', 'i']) # Time Series ind = date_range('1/1/2014', periods=3) @@ -1374,20 +1608,20 @@ def test_df_legend_labels(self): df2 = DataFrame(randn(3, 3), columns=['d', 'e', 'f'], index=ind) df3 = DataFrame(randn(3, 3), columns=['g', 'h', 'i'], index=ind) ax = df.plot(legend=True, secondary_y='b') - self._check_legend_labels(ax, ['a', 'b (right)', 'c']) + self._check_legend_labels(ax, labels=['a', 'b (right)', 'c']) ax = df2.plot(legend=False, ax=ax) - self._check_legend_labels(ax, ['a', 'b (right)', 'c']) + self._check_legend_labels(ax, labels=['a', 'b (right)', 'c']) ax = df3.plot(legend=True, ax=ax) - self._check_legend_labels(ax, ['a', 'b (right)', 'c', 'g', 'h', 'i']) + self._check_legend_labels(ax, labels=['a', 'b (right)', 'c', 'g', 'h', 'i']) # scatter ax = df.plot(kind='scatter', x='a', y='b', label='data1') - self._check_legend_labels(ax, ['data1']) + self._check_legend_labels(ax, labels=['data1']) ax = df2.plot(kind='scatter', x='d', y='e', legend=False, label='data2', ax=ax) - self._check_legend_labels(ax, ['data1']) + self._check_legend_labels(ax, labels=['data1']) ax = df3.plot(kind='scatter', x='g', y='h', label='data3', ax=ax) - self._check_legend_labels(ax, ['data1', 'data3']) + self._check_legend_labels(ax, labels=['data1', 'data3']) def test_legend_name(self): multi = DataFrame(randn(4, 4), @@ -1397,25 +1631,30 @@ def test_legend_name(self): ax = multi.plot() leg_title = ax.legend_.get_title() - self.assertEqual(leg_title.get_text(), 'group,individual') + self._check_text_labels(leg_title, 'group,individual') df = DataFrame(randn(5, 5)) ax = df.plot(legend=True, ax=ax) leg_title = ax.legend_.get_title() - self.assertEqual(leg_title.get_text(), 'group,individual') + self._check_text_labels(leg_title, 'group,individual') df.columns.name = 'new' ax = df.plot(legend=False, ax=ax) leg_title = ax.legend_.get_title() - self.assertEqual(leg_title.get_text(), 'group,individual') + self._check_text_labels(leg_title, 'group,individual') ax = df.plot(legend=True, ax=ax) leg_title = ax.legend_.get_title() - self.assertEqual(leg_title.get_text(), 'new') + self._check_text_labels(leg_title, 'new') - def _check_plot_fails(self, f, *args, **kwargs): - with tm.assertRaises(Exception): - f(*args, **kwargs) + @slow + def test_no_legend(self): + kinds = 'line', 'bar', 'barh', 'kde', 'area' + df = DataFrame(rand(3, 3), columns=['a', 'b', 'c']) + + for kind in kinds: + ax = df.plot(kind=kind, legend=False) + self._check_legend_labels(ax, visible=False) @slow def test_style_by_column(self): @@ -1433,25 +1672,8 @@ def test_style_by_column(self): for i, l in enumerate(ax.get_lines()[:len(markers)]): self.assertEqual(l.get_marker(), markers[i]) - def check_line_colors(self, colors, lines): - for i, l in enumerate(lines): - xp = colors[i] - rs = l.get_color() - self.assertEqual(xp, rs) - - def check_collection_colors(self, colors, cols): - from matplotlib.colors import ColorConverter - conv = ColorConverter() - for i, c in enumerate(cols): - xp = colors[i] - xp = conv.to_rgba(xp) - rs = c.get_facecolor()[0] - for x, y in zip(xp, rs): - self.assertEqual(x, y) - @slow def test_line_colors(self): - import matplotlib.pyplot as plt import sys from matplotlib import cm @@ -1459,7 +1681,7 @@ def test_line_colors(self): df = DataFrame(randn(5, 5)) ax = df.plot(color=custom_colors) - self.check_line_colors(custom_colors, ax.get_lines()) + self._check_colors(ax.get_lines(), linecolors=custom_colors) tmp = sys.stderr sys.stderr = StringIO() @@ -1475,21 +1697,19 @@ def test_line_colors(self): tm.close() ax = df.plot(colormap='jet') - rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df))) - self.check_line_colors(rgba_colors, ax.get_lines()) - + self._check_colors(ax.get_lines(), linecolors=rgba_colors) tm.close() ax = df.plot(colormap=cm.jet) - rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df))) - self.check_line_colors(rgba_colors, ax.get_lines()) + self._check_colors(ax.get_lines(), linecolors=rgba_colors) + tm.close() # make color a list if plotting one column frame # handles cases like df.plot(color='DodgerBlue') - tm.close() - df.ix[:, [0]].plot(color='DodgerBlue') + ax = df.ix[:, [0]].plot(color='DodgerBlue') + self._check_colors(ax.lines, linecolors=['DodgerBlue']) @slow def test_area_colors(self): @@ -1500,21 +1720,23 @@ def test_area_colors(self): df = DataFrame(rand(5, 5)) ax = df.plot(kind='area', color=custom_colors) - self.check_line_colors(custom_colors, ax.get_lines()) + self._check_colors(ax.get_lines(), linecolors=custom_colors) poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)] - self.check_collection_colors(custom_colors, poly) + self._check_colors(poly, facecolors=custom_colors) + tm.close() ax = df.plot(kind='area', colormap='jet') rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df))) - self.check_line_colors(rgba_colors, ax.get_lines()) + self._check_colors(ax.get_lines(), linecolors=rgba_colors) poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)] - self.check_collection_colors(rgba_colors, poly) - + self._check_colors(poly, facecolors=rgba_colors) + tm.close() + ax = df.plot(kind='area', colormap=cm.jet) rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df))) - self.check_line_colors(rgba_colors, ax.get_lines()) + self._check_colors(ax.get_lines(), linecolors=rgba_colors) poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)] - self.check_collection_colors(rgba_colors, poly) + self._check_colors(poly, facecolors=rgba_colors) def test_default_color_cycle(self): import matplotlib.pyplot as plt @@ -1523,11 +1745,8 @@ def test_default_color_cycle(self): df = DataFrame(randn(5, 3)) ax = df.plot() - lines = ax.get_lines() - for i, l in enumerate(lines): - xp = plt.rcParams['axes.color_cycle'][i] - rs = l.get_color() - self.assertEqual(xp, rs) + expected = plt.rcParams['axes.color_cycle'][:3] + self._check_colors(ax.get_lines(), linecolors=expected) def test_unordered_ts(self): df = DataFrame(np.array([3.0, 2.0, 1.0]), @@ -1572,9 +1791,7 @@ def test_invalid_kind(self): @slow def test_hexbin_basic(self): - df = DataFrame({"A": np.random.uniform(size=20), - "B": np.random.uniform(size=20), - "C": np.arange(20) + np.random.uniform(size=20)}) + df = self.hexbin_df ax = df.plot(kind='hexbin', x='A', y='B', gridsize=10) # TODO: need better way to test. This just does existence. @@ -1582,14 +1799,15 @@ def test_hexbin_basic(self): # GH 6951 axes = df.plot(x='A', y='B', kind='hexbin', subplots=True) - # hexbin should have 2 axes, 1 for plotting and another is colorbar + # hexbin should have 2 axes in the figure, 1 for plotting and another is colorbar self.assertEqual(len(axes[0].figure.axes), 2) + # return value is single axes + self._check_axes_shape(axes, axes_num=1, layout=(1, )) + @slow def test_hexbin_with_c(self): - df = DataFrame({"A": np.random.uniform(size=20), - "B": np.random.uniform(size=20), - "C": np.arange(20) + np.random.uniform(size=20)}) + df = self.hexbin_df ax = df.plot(kind='hexbin', x='A', y='B', C='C') self.assertEqual(len(ax.collections), 1) @@ -1600,9 +1818,7 @@ def test_hexbin_with_c(self): @slow def test_hexbin_cmap(self): - df = DataFrame({"A": np.random.uniform(size=20), - "B": np.random.uniform(size=20), - "C": np.arange(20) + np.random.uniform(size=20)}) + df = self.hexbin_df # Default to BuGn ax = df.plot(kind='hexbin', x='A', y='B') @@ -1614,18 +1830,14 @@ def test_hexbin_cmap(self): @slow def test_no_color_bar(self): - df = DataFrame({"A": np.random.uniform(size=20), - "B": np.random.uniform(size=20), - "C": np.arange(20) + np.random.uniform(size=20)}) + df = self.hexbin_df ax = df.plot(kind='hexbin', x='A', y='B', colorbar=None) self.assertIs(ax.collections[0].colorbar, None) @slow def test_allow_cmap(self): - df = DataFrame({"A": np.random.uniform(size=20), - "B": np.random.uniform(size=20), - "C": np.arange(20) + np.random.uniform(size=20)}) + df = self.hexbin_df ax = df.plot(kind='hexbin', x='A', y='B', cmap='YlGn') self.assertEquals(ax.collections[0].cmap.name, 'YlGn') @@ -1642,14 +1854,12 @@ def test_pie_df(self): df.plot(kind='pie') ax = _check_plot_works(df.plot, kind='pie', y='Y') - for t, expected in zip(ax.texts, df.index): - self.assertEqual(t.get_text(), expected) + self._check_text_labels(ax.texts, df.index) axes = _check_plot_works(df.plot, kind='pie', subplots=True) self.assertEqual(len(axes), len(df.columns)) for ax in axes: - for t, expected in zip(ax.texts, df.index): - self.assertEqual(t.get_text(), expected) + self._check_text_labels(ax.texts, df.index) for ax, ylabel in zip(axes, df.columns): self.assertEqual(ax.get_ylabel(), ylabel) @@ -1659,16 +1869,11 @@ def test_pie_df(self): labels=labels, colors=color_args) self.assertEqual(len(axes), len(df.columns)) - import matplotlib.colors as colors - conv = colors.colorConverter for ax in axes: - for t, expected in zip(ax.texts, labels): - self.assertEqual(t.get_text(), expected) - for p, expected in zip(ax.patches, color_args): - self.assertEqual(p.get_facecolor(), conv.to_rgba(expected)) + self._check_text_labels(ax.texts, labels) + self._check_colors(ax.patches, facecolors=color_args) def test_errorbar_plot(self): - d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)} df = DataFrame(d) d_err = {'x': np.ones(12)*0.2, 'y': np.ones(12)*0.4} @@ -1676,50 +1881,49 @@ def test_errorbar_plot(self): # check line plots ax = _check_plot_works(df.plot, yerr=df_err, logy=True) - _check_has_errorbars(self, ax, xerr=0, yerr=2) + self._check_has_errorbars(ax, xerr=0, yerr=2) ax = _check_plot_works(df.plot, yerr=df_err, logx=True, logy=True) - _check_has_errorbars(self, ax, xerr=0, yerr=2) + self._check_has_errorbars(ax, xerr=0, yerr=2) ax = _check_plot_works(df.plot, yerr=df_err, loglog=True) - _check_has_errorbars(self, ax, xerr=0, yerr=2) + self._check_has_errorbars(ax, xerr=0, yerr=2) kinds = ['line', 'bar', 'barh'] for kind in kinds: ax = _check_plot_works(df.plot, yerr=df_err['x'], kind=kind) - _check_has_errorbars(self, ax, xerr=0, yerr=2) + self._check_has_errorbars(ax, xerr=0, yerr=2) ax = _check_plot_works(df.plot, yerr=d_err, kind=kind) - _check_has_errorbars(self, ax, xerr=0, yerr=2) + self._check_has_errorbars(ax, xerr=0, yerr=2) ax = _check_plot_works(df.plot, yerr=df_err, xerr=df_err, kind=kind) - _check_has_errorbars(self, ax, xerr=2, yerr=2) + self._check_has_errorbars(ax, xerr=2, yerr=2) ax = _check_plot_works(df.plot, yerr=df_err['x'], xerr=df_err['x'], kind=kind) - _check_has_errorbars(self, ax, xerr=2, yerr=2) + self._check_has_errorbars(ax, xerr=2, yerr=2) ax = _check_plot_works(df.plot, xerr=0.2, yerr=0.2, kind=kind) - _check_has_errorbars(self, ax, xerr=2, yerr=2) + self._check_has_errorbars(ax, xerr=2, yerr=2) axes = _check_plot_works(df.plot, yerr=df_err, xerr=df_err, subplots=True, kind=kind) - for ax in axes: - _check_has_errorbars(self, ax, xerr=1, yerr=1) + self._check_has_errorbars(axes, xerr=1, yerr=1) ax = _check_plot_works((df+1).plot, yerr=df_err, xerr=df_err, kind='bar', log=True) - _check_has_errorbars(self, ax, xerr=2, yerr=2) + self._check_has_errorbars(ax, xerr=2, yerr=2) # yerr is raw error values ax = _check_plot_works(df['y'].plot, yerr=np.ones(12)*0.4) - _check_has_errorbars(self, ax, xerr=0, yerr=1) + self._check_has_errorbars(ax, xerr=0, yerr=1) ax = _check_plot_works(df.plot, yerr=np.ones((2, 12))*0.4) - _check_has_errorbars(self, ax, xerr=0, yerr=2) + self._check_has_errorbars(ax, xerr=0, yerr=2) # yerr is iterator import itertools ax = _check_plot_works(df.plot, yerr=itertools.repeat(0.1, len(df))) - _check_has_errorbars(self, ax, xerr=0, yerr=2) + self._check_has_errorbars(ax, xerr=0, yerr=2) # yerr is column name for yerr in ['yerr', u('誤差')]: s_df = df.copy() s_df[yerr] = np.ones(12)*0.2 ax = _check_plot_works(s_df.plot, yerr=yerr) - _check_has_errorbars(self, ax, xerr=0, yerr=2) + self._check_has_errorbars(ax, xerr=0, yerr=2) ax = _check_plot_works(s_df.plot, y='y', x='x', yerr=yerr) - _check_has_errorbars(self, ax, xerr=0, yerr=1) + self._check_has_errorbars(ax, xerr=0, yerr=1) with tm.assertRaises(ValueError): df.plot(yerr=np.random.randn(11)) @@ -1734,9 +1938,9 @@ def test_errorbar_with_integer_column_names(self): df = DataFrame(np.random.randn(10, 2)) df_err = DataFrame(np.random.randn(10, 2)) ax = _check_plot_works(df.plot, yerr=df_err) - _check_has_errorbars(self, ax, xerr=0, yerr=2) + self._check_has_errorbars(ax, xerr=0, yerr=2) ax = _check_plot_works(df.plot, y=0, yerr=1) - _check_has_errorbars(self, ax, xerr=0, yerr=1) + self._check_has_errorbars(ax, xerr=0, yerr=1) @slow def test_errorbar_with_partial_columns(self): @@ -1745,13 +1949,13 @@ def test_errorbar_with_partial_columns(self): kinds = ['line', 'bar'] for kind in kinds: ax = _check_plot_works(df.plot, yerr=df_err, kind=kind) - _check_has_errorbars(self, ax, xerr=0, yerr=2) + self._check_has_errorbars(ax, xerr=0, yerr=2) ix = date_range('1/1/2000', periods=10, freq='M') df.set_index(ix, inplace=True) df_err.set_index(ix, inplace=True) ax = _check_plot_works(df.plot, yerr=df_err, kind='line') - _check_has_errorbars(self, ax, xerr=0, yerr=2) + self._check_has_errorbars(ax, xerr=0, yerr=2) d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)} df = DataFrame(d) @@ -1759,7 +1963,7 @@ def test_errorbar_with_partial_columns(self): df_err = DataFrame(d_err) for err in [d_err, df_err]: ax = _check_plot_works(df.plot, yerr=err) - _check_has_errorbars(self, ax, xerr=0, yerr=1) + self._check_has_errorbars(ax, xerr=0, yerr=1) @slow def test_errorbar_timeseries(self): @@ -1775,18 +1979,17 @@ def test_errorbar_timeseries(self): kinds = ['line', 'bar', 'barh'] for kind in kinds: ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind) - _check_has_errorbars(self, ax, xerr=0, yerr=2) + self._check_has_errorbars(ax, xerr=0, yerr=2) ax = _check_plot_works(tdf.plot, yerr=d_err, kind=kind) - _check_has_errorbars(self, ax, xerr=0, yerr=2) + self._check_has_errorbars(ax, xerr=0, yerr=2) ax = _check_plot_works(tdf.plot, y='y', yerr=tdf_err['x'], kind=kind) - _check_has_errorbars(self, ax, xerr=0, yerr=1) + self._check_has_errorbars(ax, xerr=0, yerr=1) ax = _check_plot_works(tdf.plot, y='y', yerr='x', kind=kind) - _check_has_errorbars(self, ax, xerr=0, yerr=1) + self._check_has_errorbars(ax, xerr=0, yerr=1) ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind) - _check_has_errorbars(self, ax, xerr=0, yerr=2) + self._check_has_errorbars(ax, xerr=0, yerr=2) axes = _check_plot_works(tdf.plot, kind=kind, yerr=tdf_err, subplots=True) - for ax in axes: - _check_has_errorbars(self, ax, xerr=0, yerr=1) + self._check_has_errorbars(axes, xerr=0, yerr=1) def test_errorbar_asymmetrical(self): @@ -1826,65 +2029,58 @@ def test_errorbar_scatter(self): index=range(5), columns=['x', 'y']) ax = _check_plot_works(df.plot, kind='scatter', x='x', y='y') - _check_has_errorbars(self, ax, xerr=0, yerr=0) + self._check_has_errorbars(ax, xerr=0, yerr=0) ax = _check_plot_works(df.plot, kind='scatter', x='x', y='y', xerr=df_err) - _check_has_errorbars(self, ax, xerr=1, yerr=0) + self._check_has_errorbars(ax, xerr=1, yerr=0) ax = _check_plot_works(df.plot, kind='scatter', x='x', y='y', yerr=df_err) - _check_has_errorbars(self, ax, xerr=0, yerr=1) + self._check_has_errorbars(ax, xerr=0, yerr=1) ax = _check_plot_works(df.plot, kind='scatter', x='x', y='y', xerr=df_err, yerr=df_err) - _check_has_errorbars(self, ax, xerr=1, yerr=1) + self._check_has_errorbars(ax, xerr=1, yerr=1) @tm.mplskip -class TestDataFrameGroupByPlots(tm.TestCase): - - def setUp(self): - n = 100 - with tm.RNGContext(42): - gender = tm.choice(['Male', 'Female'], size=n) - classroom = tm.choice(['A', 'B', 'C'], size=n) - - self.hist_df = DataFrame({'gender': gender, - 'classroom': classroom, - 'height': random.normal(66, 4, size=n), - 'weight': random.normal(161, 32, size=n), - 'category': random.randint(4, size=n)}) - - def tearDown(self): - tm.close() +class TestDataFrameGroupByPlots(TestPlotBase): @slow def test_boxplot(self): - df = DataFrame(np.random.rand(10, 2), columns=['Col1', 'Col2']) - df['X'] = Series(['A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B']) - grouped = df.groupby(by='X') - _check_plot_works(grouped.boxplot) - _check_plot_works(grouped.boxplot, subplots=False) + # unable to check layout because boxplot doesn't return ndarray + # axes_num can be checked using gcf().axes + grouped = self.hist_df.groupby(by='gender') + box = _check_plot_works(grouped.boxplot) + self._check_axes_shape(self.plt.gcf().axes, axes_num=2) + + box = _check_plot_works(grouped.boxplot, subplots=False) + self._check_axes_shape(self.plt.gcf().axes, axes_num=2) tuples = lzip(string.ascii_letters[:10], range(10)) df = DataFrame(np.random.rand(10, 3), index=MultiIndex.from_tuples(tuples)) + grouped = df.groupby(level=1) - _check_plot_works(grouped.boxplot) - _check_plot_works(grouped.boxplot, subplots=False) + box = _check_plot_works(grouped.boxplot) + self._check_axes_shape(self.plt.gcf().axes, axes_num=10) + + box = _check_plot_works(grouped.boxplot, subplots=False) + self._check_axes_shape(self.plt.gcf().axes, axes_num=10) grouped = df.unstack(level=1).groupby(level=0, axis=1) - _check_plot_works(grouped.boxplot) - _check_plot_works(grouped.boxplot, subplots=False) + box = _check_plot_works(grouped.boxplot) + self._check_axes_shape(self.plt.gcf().axes, axes_num=3) + + box = _check_plot_works(grouped.boxplot, subplots=False) + self._check_axes_shape(self.plt.gcf().axes, axes_num=3) def test_series_plot_color_kwargs(self): # GH1890 ax = Series(np.arange(12) + 1).plot(color='green') - line = ax.get_lines()[0] - self.assertEqual(line.get_color(), 'green') + self._check_colors(ax.get_lines(), linecolors=['green']) def test_time_series_plot_color_kwargs(self): # #1890 ax = Series(np.arange(12) + 1, index=date_range( '1/1/2000', periods=12)).plot(color='green') - line = ax.get_lines()[0] - self.assertEqual(line.get_color(), 'green') + self._check_colors(ax.get_lines(), linecolors=['green']) def test_time_series_plot_color_with_empty_kwargs(self): import matplotlib as mpl @@ -1897,25 +2093,18 @@ def test_time_series_plot_color_with_empty_kwargs(self): for i in range(ncolors): ax = s.plot() - - line_colors = [l.get_color() for l in ax.get_lines()] - self.assertEqual(line_colors, def_colors[:ncolors]) + self._check_colors(ax.get_lines(), linecolors=def_colors[:ncolors]) @slow def test_grouped_hist(self): - import matplotlib.pyplot as plt df = DataFrame(randn(500, 2), columns=['A', 'B']) df['C'] = np.random.randint(0, 4, 500) axes = plotting.grouped_hist(df.A, by=df.C) - self.assertEqual(len(axes.ravel()), 4) + self._check_axes_shape(axes, axes_num=4, layout=(2, 2), figsize=(10, 5)) tm.close() axes = df.hist(by=df.C) - self.assertEqual(axes.ndim, 2) - self.assertEqual(len(axes.ravel()), 4) - - for ax in axes.ravel(): - self.assert_(len(ax.patches) > 0) + self._check_axes_shape(axes, axes_num=4, layout=(2, 2), figsize=(10, 5)) tm.close() # make sure kwargs to hist are handled @@ -1930,87 +2119,46 @@ def test_grouped_hist(self): tm.close() axes = plotting.grouped_hist(df.A, by=df.C, log=True) # scale of y must be 'log' - for ax in axes.ravel(): - self.assertEqual(ax.get_yscale(), 'log') + self._check_ax_scales(axes, yaxis='log') tm.close() # propagate attr exception from matplotlib.Axes.hist with tm.assertRaises(AttributeError): plotting.grouped_hist(df.A, by=df.C, foo='bar') - def _check_axes_shape(self, axes, axes_num=None, layout=None, figsize=(8.0, 6.0)): - """ - Check expected number of axes is drawn in expected layout - - Parameters - ---------- - axes : matplotlib Axes object, or its list-like - axes_num : number - expected number of axes. Unnecessary axes should be set to invisible. - layout : tuple - expected layout - figsize : tuple - expected figsize. default is matplotlib default - """ - visible_axes = self._flatten_visible(axes) - - if axes_num is not None: - self.assertEqual(len(visible_axes), axes_num) - for ax in visible_axes: - # check something drawn on visible axes - self.assert_(len(ax.get_children()) > 0) - - if layout is not None: - if isinstance(axes, list): - self.assertEqual((len(axes), ), layout) - elif isinstance(axes, np.ndarray): - self.assertEqual(axes.shape, layout) - else: - # in case of AxesSubplot - self.assertEqual((1, ), layout) - - self.assert_numpy_array_equal(np.round(visible_axes[0].figure.get_size_inches()), - np.array(figsize)) - - def _flatten_visible(self, axes): - axes = plotting._flatten(axes) - axes = [ax for ax in axes if ax.get_visible()] - return axes - @slow def test_grouped_box_layout(self): - import matplotlib.pyplot as plt df = self.hist_df - self.assertRaises(ValueError, df.boxplot, column=['weight', 'height'], by=df.gender, - layout=(1, 1)) + self.assertRaises(ValueError, df.boxplot, column=['weight', 'height'], + by=df.gender, layout=(1, 1)) self.assertRaises(ValueError, df.boxplot, column=['height', 'weight', 'category'], layout=(2, 1)) box = _check_plot_works(df.groupby('gender').boxplot, column='height') - self._check_axes_shape(plt.gcf().axes, axes_num=2) + self._check_axes_shape(self.plt.gcf().axes, axes_num=2) box = _check_plot_works(df.groupby('category').boxplot, column='height') - self._check_axes_shape(plt.gcf().axes, axes_num=4) + self._check_axes_shape(self.plt.gcf().axes, axes_num=4) # GH 6769 box = _check_plot_works(df.groupby('classroom').boxplot, column='height') - self._check_axes_shape(plt.gcf().axes, axes_num=3) + self._check_axes_shape(self.plt.gcf().axes, axes_num=3) box = df.boxplot(column=['height', 'weight', 'category'], by='gender') - self._check_axes_shape(plt.gcf().axes, axes_num=3) + self._check_axes_shape(self.plt.gcf().axes, axes_num=3) box = df.groupby('classroom').boxplot(column=['height', 'weight', 'category']) - self._check_axes_shape(plt.gcf().axes, axes_num=3) + self._check_axes_shape(self.plt.gcf().axes, axes_num=3) box = _check_plot_works(df.groupby('category').boxplot, column='height', layout=(3, 2)) - self._check_axes_shape(plt.gcf().axes, axes_num=4) + self._check_axes_shape(self.plt.gcf().axes, axes_num=4) box = df.boxplot(column=['height', 'weight', 'category'], by='gender', layout=(4, 1)) - self._check_axes_shape(plt.gcf().axes, axes_num=3) + self._check_axes_shape(self.plt.gcf().axes, axes_num=3) box = df.groupby('classroom').boxplot(column=['height', 'weight', 'category'], layout=(1, 4)) - self._check_axes_shape(plt.gcf().axes, axes_num=3) + self._check_axes_shape(self.plt.gcf().axes, axes_num=3) @slow def test_grouped_hist_layout(self): @@ -2029,6 +2177,7 @@ def test_grouped_hist_layout(self): axes = _check_plot_works(df.hist, column='height', by=df.category, layout=(4, 2), figsize=(12, 8)) + self._check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 8)) # GH 6769 @@ -2115,21 +2264,6 @@ def assert_is_valid_plot_return_object(objs): ''.format(objs.__class__.__name__)) -def _check_has_errorbars(t, ax, xerr=0, yerr=0): - containers = ax.containers - xerr_count = 0 - yerr_count = 0 - for c in containers: - has_xerr = getattr(c, 'has_xerr', False) - has_yerr = getattr(c, 'has_yerr', False) - if has_xerr: - xerr_count += 1 - if has_yerr: - yerr_count += 1 - t.assertEqual(xerr, xerr_count) - t.assertEqual(yerr, yerr_count) - - def _check_plot_works(f, *args, **kwargs): import matplotlib.pyplot as plt ret = None
Create base class for plotting related test, and organized data creation and validation function.
https://api.github.com/repos/pandas-dev/pandas/pulls/7036
2014-05-05T01:15:25Z
2014-05-11T18:05:56Z
2014-05-11T18:05:56Z
2014-07-16T09:04:29Z
TST: nose.SkipTest on RemoteDataErrors in tests for io.data.Options
diff --git a/doc/source/release.rst b/doc/source/release.rst index 5b51100c28c71..9bfc3609f5b6d 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -457,6 +457,7 @@ Bug Fixes - Bug causing UnicodeEncodeError when get_dummies called with unicode values and a prefix (:issue:`6885`) - Bug in timeseries-with-frequency plot cursor display (:issue:`5453`) - Bug surfaced in groupby.plot when using a ``Float64Index`` (:issue:`7025`) +- Stopped tests from failing if options data isn't able to be downloaded from Yahoo (:issue:`7034`) pandas 0.13.1 ------------- diff --git a/pandas/io/tests/test_data.py b/pandas/io/tests/test_data.py index a7d92d41eec15..8f98806d1ad59 100644 --- a/pandas/io/tests/test_data.py +++ b/pandas/io/tests/test_data.py @@ -9,7 +9,7 @@ import pandas as pd from pandas import DataFrame from pandas.io import data as web -from pandas.io.data import DataReader, SymbolWarning +from pandas.io.data import DataReader, SymbolWarning, RemoteDataError from pandas.util.testing import (assert_series_equal, assert_produces_warning, network, assert_frame_equal) import pandas.util.testing as tm @@ -252,8 +252,8 @@ def tearDownClass(cls): def test_get_options_data(self): try: calls, puts = self.aapl.get_options_data(expiry=self.expiry) - except IndexError: - warnings.warn("IndexError thrown no tables found") + except RemoteDataError as e: + nose.SkipTest(e) else: assert len(calls)>1 assert len(puts)>1 @@ -269,8 +269,8 @@ def test_get_near_stock_price(self): try: calls, puts = self.aapl.get_near_stock_price(call=True, put=True, expiry=self.expiry) - except IndexError: - warnings.warn("IndexError thrown no tables found") + except RemoteDataError as e: + nose.SkipTest(e) else: self.assertEqual(len(calls), 5) self.assertEqual(len(puts), 5) @@ -279,8 +279,8 @@ def test_get_near_stock_price(self): def test_get_call_data(self): try: calls = self.aapl.get_call_data(expiry=self.expiry) - except IndexError: - warnings.warn("IndexError thrown no tables found") + except RemoteDataError as e: + nose.SkipTest(e) else: assert len(calls)>1 @@ -288,8 +288,8 @@ def test_get_call_data(self): def test_get_put_data(self): try: puts = self.aapl.get_put_data(expiry=self.expiry) - except IndexError: - warnings.warn("IndexError thrown no tables found") + except RemoteDataError as e: + nose.SkipTest(e) else: assert len(puts)>1 @@ -321,8 +321,8 @@ def test_get_options_data_warning(self): print('month: {0}, year: {1}'.format(self.month, self.year)) try: self.aapl.get_options_data(month=self.month, year=self.year) - except IndexError: - warnings.warn("IndexError thrown no tables found") + except RemoteDataError as e: + nose.SkipTest(e) @network def test_get_near_stock_price_warning(self): @@ -333,8 +333,8 @@ def test_get_near_stock_price_warning(self): put=True, month=self.month, year=self.year) - except IndexError: - warnings.warn("IndexError thrown no tables found") + except RemoteDataError as e: + nose.SkipTest(e) @network def test_get_call_data_warning(self): @@ -342,8 +342,8 @@ def test_get_call_data_warning(self): print('month: {0}, year: {1}'.format(self.month, self.year)) try: self.aapl.get_call_data(month=self.month, year=self.year) - except IndexError: - warnings.warn("IndexError thrown no tables found") + except RemoteDataError as e: + nose.SkipTest(e) @network def test_get_put_data_warning(self): @@ -351,8 +351,8 @@ def test_get_put_data_warning(self): print('month: {0}, year: {1}'.format(self.month, self.year)) try: self.aapl.get_put_data(month=self.month, year=self.year) - except IndexError: - warnings.warn("IndexError thrown no tables found") + except RemoteDataError as e: + nose.SkipTest(e) class TestDataReader(tm.TestCase):
Prevents tests from failing if data isn't able to be downloaded from Yahoo Finance.
https://api.github.com/repos/pandas-dev/pandas/pulls/7034
2014-05-04T23:08:12Z
2014-05-05T10:00:59Z
2014-05-05T10:00:59Z
2014-06-16T05:08:36Z
BUG: fix reading multi-index data in python parser
diff --git a/doc/source/release.rst b/doc/source/release.rst index 064fd3cf12b2f..b5a11091779ec 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -457,6 +457,7 @@ Bug Fixes - accept ``TextFileReader`` in ``concat``, which was affecting a common user idiom (:issue:`6583`) - Bug in C parser with leading whitespace (:issue:`3374`) - Bug in C parser with ``delim_whitespace=True`` and ``\r``-delimited lines +- Bug in python parser with explicit multi-index in row following column header (:issue:`6893`) - Bug in ``Series.rank`` and ``DataFrame.rank`` that caused small floats (<1e-13) to all receive the same rank (:issue:`6886`) - Bug in ``DataFrame.apply`` with functions that used \*args`` or \*\*kwargs and returned an empty result (:issue:`6952`) diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index b439ca5c61aeb..4898fabfcd2b4 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -1383,7 +1383,7 @@ def __init__(self, f, **kwds): # multiple date column thing turning into a real spaghetti factory if not self._has_complex_date_col: (index_names, - self.orig_names, columns_) = self._get_index_name(self.columns) + self.orig_names, self.columns) = self._get_index_name(self.columns) self._name_processed = True if self.index_names is None: self.index_names = index_names @@ -1811,8 +1811,9 @@ def _get_index_name(self, columns): columns.insert(0, c) # Update list of original names to include all indices. - self.num_original_columns = len(next_line) - return line, columns, orig_names + orig_names = list(columns) + self.num_original_columns = len(columns) + return line, orig_names, columns if implicit_first_cols > 0: # Case 1 @@ -1824,7 +1825,7 @@ def _get_index_name(self, columns): else: # Case 2 - (index_name, columns, + (index_name, columns_, self.index_col) = _clean_index_names(columns, self.index_col) return index_name, orig_names, columns diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py index 872e719eaa630..2a31eb9608001 100644 --- a/pandas/io/tests/test_parsers.py +++ b/pandas/io/tests/test_parsers.py @@ -1569,7 +1569,7 @@ def test_converter_return_string_bug(self): def test_read_table_buglet_4x_multiindex(self): # GH 6607 - # Parsing multiindex columns currently causes an error in the C parser. + # Parsing multi-level index currently causes an error in the C parser. # Temporarily copied to TestPythonParser. # Here test that CParserError is raised: @@ -2692,7 +2692,7 @@ def test_decompression_regex_sep(self): def test_read_table_buglet_4x_multiindex(self): # GH 6607 # This is a copy which should eventually be merged into ParserTests - # when the issue with multiindex columns is fixed in the C parser. + # when the issue with multi-level index is fixed in the C parser. text = """ A B C D E one two three four @@ -2704,6 +2704,13 @@ def test_read_table_buglet_4x_multiindex(self): df = self.read_table(StringIO(text), sep='\s+') self.assertEquals(df.index.names, ('one', 'two', 'three', 'four')) + # GH 6893 + data = ' A B C\na b c\n1 3 7 0 3 6\n3 1 4 1 5 9' + expected = DataFrame.from_records([(1,3,7,0,3,6), (3,1,4,1,5,9)], + columns=list('abcABC'), index=list('abc')) + actual = self.read_table(StringIO(data), sep='\s+') + tm.assert_frame_equal(actual, expected) + class TestFwfColspaceSniffing(tm.TestCase): def test_full_file(self): # File with all values
partial fix for #6893 The python parser has a problem reading data with a multi-index specified in the row following the header, for example ``` python In [3]: text = """ A B C D E one two three four a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640 a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744 x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838""" In [4]: pd.read_table(StringIO(text), sep='\s+', engine='python') Out[4]: E one two three four a b 10.0032 5 0.3640 q 20.0000 4 0.1744 x q 30.0000 3 2.5838 [3 rows x 1 columns] ``` (the C parser doesn't make it this far, see #6893) This PR fixes the bug in the python parser: ``` python In [4]: pd.read_table(StringIO(text), sep='\s+', engine='python') Out[4]: A B C D E one two three four a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640 q 20.0000 4 0.4473 1.4152 0.2834 1.00661 0.1744 x q 30.0000 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838 [3 rows x 5 columns] ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7029
2014-05-03T20:04:50Z
2014-05-06T18:02:22Z
2014-05-06T18:02:22Z
2014-06-23T23:28:38Z
DOC: Visualization reorganization
diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst index c1b7cf30067e3..fb0a0a0802bda 100644 --- a/doc/source/ecosystem.rst +++ b/doc/source/ecosystem.rst @@ -18,23 +18,40 @@ tools in the PyData space. We'd like to make it easier for users to find these project, if you know of other substantial projects that you feel should be on this list, please let us know. +.. _ecosystem.stats: + +Statistics and Machine Learning +------------------------------- + `Statsmodels <http://statsmodels.sourceforge.net>`__ ----------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Statsmodels is the prominent python "statistics and econometrics library" and it has a long-standing special relationship with pandas. Statsmodels provides powerful statistics, econometrics, analysis and modeling functionality that is out of pandas' scope. Statsmodels leverages pandas objects as the underlying data container for computation. +`sklearn-pandas <https://github.com/paulgb/sklearn-pandas>`__ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Use pandas DataFrames in your scikit-learn ML pipeline. + + + +.. _ecosystem.visualization: + +Visualization +------------- + `Vincent <https://github.com/wrobstory/vincent>`__ --------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The `Vincent <https://github.com/wrobstory/vincent>`__ project leverages `Vega <https://github.com/trifacta/vega>`__ (that in turn, leverages `d3 <http://d3js.org/>`__) to create plots . It has great support for pandas data objects. `yhat/ggplot <https://github.com/yhat/ggplot>`__ ------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Hadley Wickham's `ggplot2 <http://ggplot2.org/>`__ is a foundational exploratory visualization package for the R language. Based on `"The Grammer of Graphics" <http://www.cs.uic.edu/~wilkinson/TheGrammarOfGraphics/GOG.html>`__ it @@ -44,9 +61,8 @@ but a faithful implementation for python users has long been missing. Although s (as of Jan-2014), the `yhat/ggplot <https://github.com/yhat/ggplot>`__ project has been progressing quickly in that direction. - `Seaborn <https://github.com/mwaskom/seaborn>`__ ------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Although pandas has quite a bit of "just plot it" functionality built-in, visualization and in particular statistical graphics is a vast field with a long tradition and lots of ground @@ -54,17 +70,23 @@ to cover. The `Seaborn <https://github.com/mwaskom/seaborn>`__ project builds on and `matplotlib <http://matplotlib.org>`__ to provide easy plotting of data which extends to more advanced types of plots then those offered by pandas. +`Bokeh <http://bokeh.pydata.org>`__ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Bokeh is a Python interactive visualization library for large datasets that natively uses +the latest web technologies. Its goal is to provide elegant, concise construction of novel +graphics in the style of Protovis/D3, while delivering high-performance interactivity over +large data to thin clients. + +.. _ecosystem.domain: + +Domain Specific +--------------- `Geopandas <https://github.com/kjordahl/geopandas>`__ ------------------------------------------------------ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Geopandas extends pandas data objects to include geographic information which support geometric operations. If your work entails maps and geographical coordinates, and you love pandas, you should take a close look at Geopandas. -`sklearn-pandas <https://github.com/paulgb/sklearn-pandas>`__ -------------------------------------------------------------- - -Use pandas DataFrames in your scikit-learn ML pipeline. - - diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst index 255acad7f927b..fbc0a9005d50d 100644 --- a/doc/source/visualization.rst +++ b/doc/source/visualization.rst @@ -17,14 +17,18 @@ options.display.max_rows = 15 from pandas.compat import lrange -************************ -Plotting with matplotlib -************************ +.. note:: + + All calls to ``np.random`` are seeded with 123456. .. note:: - We intend to build more plotting integration with `matplotlib - <http://matplotlib.sourceforge.net>`__ as time goes on. + See :ref:`ecosystem <ecosystem.visualization>` for visualization libraries + that go beyond the basics included in pandas. + +******** +Plotting +******** We use the standard convention for referencing the matplotlib API: @@ -32,7 +36,9 @@ We use the standard convention for referencing the matplotlib API: import matplotlib.pyplot as plt -The ``display.mpl_style`` option was added in 0.11.0, to produce more appealing plots. +.. versionadded:: 0.11.0 + +The ``display.mpl_style`` produces more appealing plots. When set, matplotlib's ``rcParams`` are changed (globally!) to nicer-looking settings. All the plots in the documentation are rendered with this option set to the 'default' style. @@ -43,13 +49,18 @@ All the plots in the documentation are rendered with this option set to the .. _visualization.basic: -Basic plotting: ``plot`` +Basic Plotting: ``plot`` ------------------------ See the :ref:`cookbook<cookbook.plotting>` for some advanced strategies The ``plot`` method on Series and DataFrame is just a simple wrapper around -``plt.plot``: +:meth:`plt.plot() <matplotlib.axes.Axes.plot>`: + +.. ipython:: python + :suppress: + + np.random.seed(123456) .. ipython:: python @@ -59,16 +70,15 @@ The ``plot`` method on Series and DataFrame is just a simple wrapper around @savefig series_plot_basic.png ts.plot() -If the index consists of dates, it calls ``gcf().autofmt_xdate()`` to try to -format the x-axis nicely as per above. The method takes a number of arguments -for controlling the look of the plot: +If the index consists of dates, it calls :meth:`gcf().autofmt_xdate() <matplotlib.figure.Figure.autofmt_xdate>` +to try to format the x-axis nicely as per above. -.. ipython:: python +On DataFrame, :meth:`~DataFrame.plot` is a convenience to plot all of the columns with labels: - @savefig series_plot_basic2.png - plt.figure(); ts.plot(style='k--', label='Series'); +.. ipython:: python + :suppress: -On DataFrame, ``plot`` is a convenience to plot all of the columns with labels: + np.random.seed(123456) .. ipython:: python @@ -78,39 +88,16 @@ On DataFrame, ``plot`` is a convenience to plot all of the columns with labels: @savefig frame_plot_basic.png plt.figure(); df.plot(); -You may set the ``legend`` argument to ``False`` to hide the legend, which is -shown by default. - -.. ipython:: python - - @savefig frame_plot_basic_noleg.png - df.plot(legend=False) - -Some other options are available, like plotting each Series on a different axis: - -.. ipython:: python - - @savefig frame_plot_subplots.png - df.plot(subplots=True, figsize=(6, 6)); - -You may pass ``logy`` to get a log-scale Y axis. - -.. ipython:: python - - plt.figure(); - - ts = Series(randn(1000), index=date_range('1/1/2000', periods=1000)) - ts = np.exp(ts.cumsum()) - - @savefig series_plot_logy.png - ts.plot(logy=True) - You can plot one column versus another using the `x` and `y` keywords in -`DataFrame.plot`: +:meth:`~DataFrame.plot`: .. ipython:: python + :suppress: plt.figure() + np.random.seed(123456) + +.. ipython:: python df3 = DataFrame(randn(1000, 2), columns=['B', 'C']).cumsum() df3['A'] = Series(list(range(len(df)))) @@ -118,128 +105,43 @@ You can plot one column versus another using the `x` and `y` keywords in @savefig df_plot_xy.png df3.plot(x='A', y='B') +.. note:: -Plotting on a Secondary Y-axis -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To plot data on a secondary y-axis, use the ``secondary_y`` keyword: - -.. ipython:: python - - plt.figure() - - df.A.plot() - - @savefig series_plot_secondary_y.png - df.B.plot(secondary_y=True, style='g') - - -Selective Plotting on Secondary Y-axis -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To plot some columns in a DataFrame, give the column names to the ``secondary_y`` -keyword: - -.. ipython:: python - - plt.figure() - ax = df.plot(secondary_y=['A', 'B']) - ax.set_ylabel('CD scale') - @savefig frame_plot_secondary_y.png - ax.right_ax.set_ylabel('AB scale') - - - -Note that the columns plotted on the secondary y-axis is automatically marked -with "(right)" in the legend. To turn off the automatic marking, use the -``mark_right=False`` keyword: - -.. ipython:: python - - plt.figure() - - @savefig frame_plot_secondary_y_no_right.png - df.plot(secondary_y=['A', 'B'], mark_right=False) - - -Suppressing tick resolution adjustment -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Pandas includes automatically tick resolution adjustment for regular frequency -time-series data. For limited cases where pandas cannot infer the frequency -information (e.g., in an externally created ``twinx``), you can choose to -suppress this behavior for alignment purposes. - -Here is the default behavior, notice how the x-axis tick labelling is performed: - -.. ipython:: python - - plt.figure() - - @savefig ser_plot_suppress.png - df.A.plot() - - -Using the ``x_compat`` parameter, you can suppress this behavior: - -.. ipython:: python - - plt.figure() - - @savefig ser_plot_suppress_parm.png - df.A.plot(x_compat=True) - - -If you have more than one plot that needs to be suppressed, the ``use`` method -in ``pandas.plot_params`` can be used in a `with statement`: - -.. ipython:: python - - import pandas as pd - - plt.figure() - - @savefig ser_plot_suppress_context.png - with pd.plot_params.use('x_compat', True): - df.A.plot(color='r') - df.B.plot(color='g') - df.C.plot(color='b') - - -Targeting different subplots -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can pass an ``ax`` argument to ``Series.plot`` to plot on a particular axis: - -.. ipython:: python - :suppress: - - ts = Series(randn(1000), index=date_range('1/1/2000', periods=1000)) - ts = ts.cumsum() + For more formatting and sytling options, see :ref:`below <visualization.formatting>`. - df = DataFrame(randn(1000, 4), index=ts.index, columns=list('ABCD')) - df = df.cumsum() -.. ipython:: python +.. _visualization.other: - fig, axes = plt.subplots(nrows=2, ncols=2) - df['A'].plot(ax=axes[0,0]); axes[0,0].set_title('A') - df['B'].plot(ax=axes[0,1]); axes[0,1].set_title('B') - df['C'].plot(ax=axes[1,0]); axes[1,0].set_title('C') +Other Plots +----------- - @savefig series_plot_multi.png - df['D'].plot(ax=axes[1,1]); axes[1,1].set_title('D') +The ``kind`` keyword argument of :meth:`~DataFrame.plot` accepts +a handful of values for plots other than the default Line plot. +These include: -.. ipython:: python - :suppress: +* :ref:`'bar' <visualization.barplot>` or ``'barh'`` for bar plots, +* :ref:`'kde' <visualization.kde>` or ``'density'`` for density plots, +* :ref:`'area' <visualization.area_plot>` for area plots, +* :ref:`'scatter' <visualization.scatter_matrix>` for scatter plots, and +* :ref:`'hexbin' <visualization.hexbin>` for hexagonal bin plots. - plt.close('all') +In addition to these ``kind`` s, there are the :ref:`DataFrame.hist() <visualization.hist>`, +and :ref:`DataFrame.boxplot() <visualization.box>` methods, which use a separate interface. +Finally, there are several :ref:`plotting functions <visualization.tools>` in ``pandas.tools.plotting`` +that take a :class:`Series` or :class:`DataFrame` as an argument. These +include -.. _visualization.other: +* :ref:`Scatter Matrix <visualization.scatter_matrix>` +* :ref:`Andrews Curves <visualization.andrews_curves>`, +* :ref:`Parallel Coordinates <visualization.parallel_coordinates>`, +* :ref:`Lag Plot <visualization.lag>`, +* :ref:`Autocorrelation Plot <visualization.autocorrelation>`, +* :ref:`Bootstrap Plot <visualization.bootstrap>`, and +* :ref:`RadViz <visualization.radviz>`. -Other plotting features ------------------------ +Plots may also be adorned with :ref:`errorbars <visualization.errorbars>` +or :ref:`tables <visualization.table>`. .. _visualization.barplot: @@ -255,13 +157,14 @@ For labeled, non-time series data, you may wish to produce a bar plot: @savefig bar_plot_ex.png df.ix[5].plot(kind='bar'); plt.axhline(0, color='k') -Calling a DataFrame's ``plot`` method with ``kind='bar'`` produces a multiple +Calling a DataFrame's :meth:`~DataFrame.plot` method with ``kind='bar'`` produces a multiple bar plot: .. ipython:: python :suppress: plt.figure() + np.random.seed(123456) .. ipython:: python @@ -294,6 +197,8 @@ To get horizontal bar plots, pass ``kind='barh'``: @savefig barh_plot_stacked_ex.png df2.plot(kind='barh', stacked=True); +.. _visualization.hist: + Histograms ~~~~~~~~~~ .. ipython:: python @@ -304,7 +209,7 @@ Histograms df['A'].diff().hist() -For a DataFrame, ``hist`` plots the histograms of the columns on multiple +:meth:`DataFrame.hist` plots the histograms of the columns on multiple subplots: .. ipython:: python @@ -315,12 +220,15 @@ subplots: df.diff().hist(color='k', alpha=0.5, bins=50) -New since 0.10.0, the ``by`` keyword can be specified to plot grouped histograms: +.. versionadded:: 0.10.0 + +The ``by`` keyword can be specified to plot grouped histograms: .. ipython:: python :suppress: plt.figure() + np.random.seed(123456) .. ipython:: python @@ -332,15 +240,20 @@ New since 0.10.0, the ``by`` keyword can be specified to plot grouped histograms .. _visualization.box: -Box-Plotting -~~~~~~~~~~~~ +Box Plots +~~~~~~~~~ -DataFrame has a ``boxplot`` method which allows you to visualize the +DataFrame has a :meth:`~DataFrame.boxplot` method that allows you to visualize the distribution of values within each column. For instance, here is a boxplot representing five trials of 10 observations of a uniform random variable on [0,1). +.. ipython:: python + :suppress: + + np.random.seed(123456) + .. ipython:: python df = DataFrame(rand(10,5)) @@ -352,6 +265,11 @@ a uniform random variable on [0,1). You can create a stratified boxplot using the ``by`` keyword argument to create groupings. For instance, +.. ipython:: python + :suppress: + + np.random.seed(123456) + .. ipython:: python df = DataFrame(rand(10,2), columns=['Col1', 'Col2'] ) @@ -365,6 +283,11 @@ groupings. For instance, You can also pass a subset of columns to plot, as well as group by multiple columns: +.. ipython:: python + :suppress: + + np.random.seed(123456) + .. ipython:: python df = DataFrame(rand(10,3), columns=['Col1', 'Col2', 'Col3']) @@ -381,168 +304,154 @@ columns: plt.close('all') -.. _visualization.errorbars: +.. _visualization.area_plot: -Plotting With Error Bars -~~~~~~~~~~~~~~~~~~~~~~~~ +Area Plot +~~~~~~~~~ .. versionadded:: 0.14 -Plotting with error bars is now supported in the ``.plot`` method of ``DataFrame`` and ``Series`` objects. +You can create area plots with ``Series.plot`` and ``DataFrame.plot`` by passing ``kind='area'``. Area plots are stacked by default. To produce stacked area plot, each column must be either all positive or all negative values. + +When input data contains `NaN`, it will be automatically filled by 0. If you want to drop or fill by different values, use :func:`dataframe.dropna` or :func:`dataframe.fillna` before calling `plot`. + +.. ipython:: python + :suppress: -x and y errorbars are supported and be supplied using the ``xerr`` and ``yerr`` keyword arguments to ``.plot()`` The error values can be specified using a variety of formats. + np.random.seed(123456) + plt.figure() -- As a ``DataFrame`` or ``dict`` of errors with column names matching the ``columns`` attribute of the plotting ``DataFrame`` or matching the ``name`` attribute of the ``Series`` -- As a ``str`` indicating which of the columns of plotting ``DataFrame`` contain the error values -- As list-like raw values (``list``, ``tuple``, or ``np.ndarray``). Must be the same length as the plotting ``DataFrame``/``Series`` -- As float. The error value will be applied to all data. +.. ipython:: python -Asymmetrical error bars are also supported, however raw error values must be provided in this case. For a ``M`` length ``Series``, a ``Mx2`` array should be provided indicating lower and upper (or left and right) errors. For a ``MxN`` ``DataFrame``, asymmetrical errors should be in a ``Mx2xN`` array. + df = DataFrame(rand(10, 4), columns=['a', 'b', 'c', 'd']) -**Note**: Plotting ``xerr`` is not supported in time series. + @savefig area_plot_stacked.png + df.plot(kind='area'); -Here is an example of one way to easily plot group means with standard deviations from the raw data. +To produce an unstacked plot, pass ``stacked=False``. Alpha value is set to 0.5 unless otherwise specified: .. ipython:: python + :suppress: - # Generate the data - ix3 = pd.MultiIndex.from_arrays([['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b'], ['foo', 'foo', 'bar', 'bar', 'foo', 'foo', 'bar', 'bar']], names=['letter', 'word']) - df3 = pd.DataFrame({'data1': [3, 2, 4, 3, 2, 4, 3, 2], 'data2': [6, 5, 7, 5, 4, 5, 6, 5]}, index=ix3) + plt.figure() - # Group by index labels and take the means and standard deviations for each group - gp3 = df3.groupby(level=('letter', 'word')) - means = gp3.mean() - errors = gp3.std() - means - errors +.. ipython:: python - # Plot - fig, ax = plt.subplots() - @savefig errorbar_example.png - means.plot(yerr=errors, ax=ax, kind='bar') + @savefig area_plot_unstacked.png + df.plot(kind='area', stacked=False); -.. _visualization.table: +.. _visualization.hexbin: -Plotting With Table -~~~~~~~~~~~~~~~~~~~~~~~~ +Hexagonal Bin Plot +~~~~~~~~~~~~~~~~~~ .. versionadded:: 0.14 -Plotting with matplotlib table is now supported in the ``DataFrame.plot`` and ``Series.plot`` by a ``table`` keyword. The ``table`` keyword can accept ``bool``, ``DataFrame`` or ``Series``. The simple way to draw a table is to specify ``table=True``. Data will be transposed to meet matplotlib's default layout. +You can create hexagonal bin plots with :meth:`DataFrame.plot` and +``kind='hexbin'``. +Hexbin plots can be a useful alternative to scatter plots if your data are +too dense to plot each point individually. .. ipython:: python + :suppress: - fig, ax = plt.subplots(1, 1) - df = DataFrame(rand(5, 3), columns=['a', 'b', 'c']) - ax.get_xaxis().set_visible(False) # Hide Ticks + plt.figure() + np.random.seed(123456) - @savefig line_plot_table_true.png - df.plot(table=True, ax=ax) +.. ipython:: python -Also, you can pass different ``DataFrame`` or ``Series`` for ``table`` keyword. The data will be drawn as displayed in print method (not transposed automatically). If required, it should be transposed manually as below example. + df = DataFrame(randn(1000, 2), columns=['a', 'b']) + df['b'] = df['b'] = df['b'] + np.arange(1000) -.. ipython:: python + @savefig hexbin_plot.png + df.plot(kind='hexbin', x='a', y='b', gridsize=25) - fig, ax = plt.subplots(1, 1) - ax.get_xaxis().set_visible(False) # Hide Ticks - @savefig line_plot_table_data.png - df.plot(table=np.round(df.T, 2), ax=ax) +A useful keyword argument is ``gridsize``; it controls the number of hexagons +in the x-direction, and defaults to 100. A larger ``gridsize`` means more, smaller +bins. -Finally, there is a helper function ``pandas.tools.plotting.table`` to create a table from ``DataFrame`` and ``Series``, and add it to an ``matplotlib.Axes``. This function can accept keywords which matplotlib table has. +By default, a histogram of the counts around each ``(x, y)`` point is computed. +You can specify alternative aggregations by passing values to the ``C`` and +``reduce_C_function`` arguments. ``C`` specifies the value at each ``(x, y)`` point +and ``reduce_C_function`` is a function of one argument that reduces all the +values in a bin to a single number (e.g. ``mean``, ``max``, ``sum``, ``std``). In this +example the positions are given by columns ``a`` and ``b``, while the value is +given by column ``z``. The bins are aggregated with numpy's ``max`` function. .. ipython:: python + :suppress: - from pandas.tools.plotting import table - fig, ax = plt.subplots(1, 1) - - table(ax, np.round(df.describe(), 2), - loc='upper right', colWidths=[0.2, 0.2, 0.2]) + plt.figure() + np.random.seed(123456) - @savefig line_plot_table_describe.png - df.plot(ax=ax, ylim=(0, 2), legend=None) +.. ipython:: python -**Note**: You can get table instances on the axes using ``axes.tables`` property for further decorations. See the `matplotlib table documenation <http://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.table>`__ for more. - -.. _visualization.area_plot: - -Area plot -~~~~~~~~~~~~~~~~~~~ - -.. versionadded:: 0.14 + df = DataFrame(randn(1000, 2), columns=['a', 'b']) + df['b'] = df['b'] = df['b'] + np.arange(1000) + df['z'] = np.random.uniform(0, 3, 1000) -You can create area plots with ``Series.plot`` and ``DataFrame.plot`` by passing ``kind='area'``. Area plots are stacked by default. To produce stacked area plot, each column must be either all positive or all negative values. + @savefig hexbin_plot_agg.png + df.plot(kind='hexbin', x='a', y='b', C='z', reduce_C_function=np.max, + gridsize=25) -When input data contains `NaN`, it will be automatically filled by 0. If you want to drop or fill by different values, use :func:`dataframe.dropna` or :func:`dataframe.fillna` before calling `plot`. -.. ipython:: python - :suppress: +See the :meth:`hexbin <matplotlib.axes.Axes.hexbin>` method and the +`matplotlib hexbin documenation <http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hexbin>`__ for more. - plt.figure(); +.. _visualization.pie: -.. ipython:: python - - df = DataFrame(rand(10, 4), columns=['a', 'b', 'c', 'd']) +Pie plot +~~~~~~~~ - @savefig area_plot_stacked.png - df.plot(kind='area'); +.. versionadded:: 0.14 -To produce an unstacked plot, pass ``stacked=False``. Alpha value is set to 0.5 unless otherwise specified: +You can create a pie plot with :meth:`DataFrame.plot` or :meth:`Series.plot` with ``kind='pie'``. +If your data includes any ``NaN``, they will be automatically filled with 0. +A ``ValueError`` will be raised if there are any negative values in your data. .. ipython:: python :suppress: - plt.figure(); - -.. ipython:: python - - @savefig area_plot_unstacked.png - df.plot(kind='area', stacked=False); - -.. _visualization.scatter_matrix: - -Scatter plot matrix -~~~~~~~~~~~~~~~~~~~ - -*New in 0.7.3.* You can create a scatter plot matrix using the - ``scatter_matrix`` method in ``pandas.tools.plotting``: + np.random.seed(123456) + plt.figure() .. ipython:: python - from pandas.tools.plotting import scatter_matrix - df = DataFrame(randn(1000, 4), columns=['a', 'b', 'c', 'd']) - - @savefig scatter_matrix_kde.png - scatter_matrix(df, alpha=0.2, figsize=(6, 6), diagonal='kde') + series = Series(3 * rand(4), index=['a', 'b', 'c', 'd'], name='series') -.. _visualization.kde: + @savefig series_pie_plot.png + series.plot(kind='pie') -*New in 0.8.0* You can create density plots using the Series/DataFrame.plot and -setting ``kind='kde'``: +Note that pie plot with :class:`DataFrame` requires that you either specify a target column by the ``y`` +argument or ``subplots=True``. When ``y`` is specified, pie plot of selected column +will be drawn. If ``subplots=True`` is specified, pie plots for each column are drawn as subplots. +A legend will be drawn in each pie plots by default; specify ``legend=False`` to hide it. .. ipython:: python :suppress: + np.random.seed(123456) plt.figure() .. ipython:: python - ser = Series(randn(1000)) + df = DataFrame(3 * rand(4, 2), index=['a', 'b', 'c', 'd'], columns=['x', 'y']) - @savefig kde_plot.png - ser.plot(kind='kde') + @savefig df_pie_plot.png + df.plot(kind='pie', subplots=True) -.. _visualization.hexbin: +You can use the ``labels`` and ``colors`` keywords to specify the labels and colors of each wedge. -Hexagonal Bin plot -~~~~~~~~~~~~~~~~~~ +.. warning:: -.. versionadded:: 0.14 + Most pandas plots use the the ``label`` and ``color`` arguments (not the lack of "s" on those). + To be consistent with :func:`matplotlib.pyplot.pie` you must use ``labels`` and ``colors``. + +If you want to hide wedge labels, specify ``labels=None``. +If ``fontsize`` is specified, the value will be applied to wedge labels. +Also, other keywords supported by :func:`matplotlib.pyplot.pie` can be used. -You can create hexagonal bin plots with ``DataFrame.plot`` and -``kind='hexbin'``. -Hexbin plots can be a useful alternative to scatter plots if your data are -too dense to plot each point individually. .. ipython:: python :suppress: @@ -551,24 +460,11 @@ too dense to plot each point individually. .. ipython:: python - df = DataFrame(randn(1000, 2), columns=['a', 'b']) - df['b'] = df['b'] = df['b'] + np.arange(1000) - - @savefig hexbin_plot.png - df.plot(kind='hexbin', x='a', y='b', gridsize=25) - + @savefig series_pie_plot_options.png + series.plot(kind='pie', labels=['AA', 'BB', 'CC', 'DD'], colors=['r', 'g', 'b', 'c'], + autopct='%.2f', fontsize=20) -A useful keyword argument is ``gridsize``; it controls the number of hexagons -in the x-direction, and defaults to 100. A larger ``gridsize`` means more, smaller -bins. - -By default, a histogram of the counts around each ``(x, y)`` point is computed. -You can specify alternative aggregations by passing values to the ``C`` and -``reduce_C_function`` arguments. ``C`` specifies the value at each ``(x, y)`` point -and ``reduce_C_function`` is a function of one argument that reduces all the -values in a bin to a single number (e.g. ``mean``, ``max``, ``sum``, ``std``). In this -example the positions are given by columns ``a`` and ``b``, while the value is -given by column ``z``. The bins are aggregated with numpy's ``max`` function. +If you pass values whose sum total is less than 1.0, matplotlib draws a semicircle. .. ipython:: python :suppress: @@ -577,90 +473,63 @@ given by column ``z``. The bins are aggregated with numpy's ``max`` function. .. ipython:: python - df = DataFrame(randn(1000, 2), columns=['a', 'b']) - df['b'] = df['b'] = df['b'] + np.arange(1000) - df['z'] = np.random.uniform(0, 3, 1000) - - @savefig hexbin_plot_agg.png - df.plot(kind='hexbin', x='a', y='b', C='z', reduce_C_function=np.max, - gridsize=25) - - -See the `matplotlib hexbin documenation <http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hexbin>`__ for more. + series = Series([0.1] * 4, index=['a', 'b', 'c', 'd'], name='series2') -.. _visualization.pie: + @savefig series_pie_plot_semi.png + series.plot(kind='pie') -Pie plot -~~~~~~~~~~~~~~~~~~ +See the `matplotlib pie documenation <http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.pie>`__ for more. -.. versionadded:: 0.14 +.. _visualization.tools: -You can create pie plot with ``DataFrame.plot`` or ``Series.plot`` with ``kind='pie'``. -If data includes ``NaN``, it will be automatically filled by 0. -If data contains negative value, ``ValueError`` will be raised. +Plotting Tools +-------------- -.. ipython:: python - :suppress: +These functions can be imported from ``pandas.tools.plotting`` +and take a :class:`Series` or :class:`DataFrame` as an argument. - plt.figure() +.. _visualization.scatter_matrix: -.. ipython:: python - - series = Series(3 * rand(4), index=['a', 'b', 'c', 'd'], name='series') +Scatter Matrix Plot +~~~~~~~~~~~~~~~~~~~ - @savefig series_pie_plot.png - series.plot(kind='pie') +.. versionadded:: 0.7.3 -Note that pie plot with ``DataFrame`` requires either to specify target column by ``y`` -argument or ``subplots=True``. When ``y`` is specified, pie plot of selected column -will be drawn. If ``subplots=True`` is specified, pie plots for each columns are drawn as subplots. -Legend will be drawn in each pie plots by default, specify ``legend=False`` to hide it. +You can create a scatter plot matrix using the + ``scatter_matrix`` method in ``pandas.tools.plotting``: .. ipython:: python :suppress: - plt.figure() + np.random.seed(123456) .. ipython:: python - - df = DataFrame(3 * rand(4, 2), index=['a', 'b', 'c', 'd'], columns=['x', 'y']) - - @savefig df_pie_plot.png - df.plot(kind='pie', subplots=True) -You can use ``labels`` and ``colors`` keywords to specify labels and colors of each wedges -(Cannot use ``label`` and ``color``, because of matplotlib's specification). -If you want to hide wedge labels, specify ``labels=None``. -If ``fontsize`` is specified, the value will be applied to wedge labels. -Also, other keywords supported by :func:`matplotlib.pyplot.pie` can be used. + from pandas.tools.plotting import scatter_matrix + df = DataFrame(randn(1000, 4), columns=['a', 'b', 'c', 'd']) + @savefig scatter_matrix_kde.png + scatter_matrix(df, alpha=0.2, figsize=(6, 6), diagonal='kde') -.. ipython:: python - :suppress: +.. _visualization.kde: - plt.figure() +.. versionadded:: 0.8.0 -.. ipython:: python - - @savefig series_pie_plot_options.png - series.plot(kind='pie', labels=['AA', 'BB', 'CC', 'DD'], colors=['r', 'g', 'b', 'c'], - autopct='%.2f', fontsize=20) - -If you pass values which sum total is less than 1.0, matplotlib draws semicircle. +You can create density plots using the Series/DataFrame.plot and +setting ``kind='kde'``: .. ipython:: python :suppress: plt.figure() + np.random.seed(123456) .. ipython:: python - - series = Series([0.1] * 4, index=['a', 'b', 'c', 'd'], name='series2') - @savefig series_pie_plot_semi.png - series.plot(kind='pie') + ser = Series(randn(1000)) -See the `matplotlib pie documenation <http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.pie>`__ for more. + @savefig kde_plot.png + ser.plot(kind='kde') .. _visualization.andrews_curves: @@ -710,6 +579,8 @@ represents one data point. Points that tend to cluster will appear closer togeth @savefig parallel_coordinates.png parallel_coordinates(data, 'Name') +.. _visualization.lag: + Lag Plot ~~~~~~~~ @@ -717,6 +588,11 @@ Lag plots are used to check if a data set or time series is random. Random data should not exhibit any structure in the lag plot. Non-random structure implies that the underlying data are not random. +.. ipython:: python + :suppress: + + np.random.seed(123456) + .. ipython:: python from pandas.tools.plotting import lag_plot @@ -729,6 +605,8 @@ implies that the underlying data are not random. @savefig lag_plot.png lag_plot(data) +.. _visualization.autocorrelation: + Autocorrelation Plot ~~~~~~~~~~~~~~~~~~~~ @@ -740,6 +618,11 @@ autocorrelations will be significantly non-zero. The horizontal lines displayed in the plot correspond to 95% and 99% confidence bands. The dashed line is 99% confidence band. +.. ipython:: python + :suppress: + + np.random.seed(123456) + .. ipython:: python from pandas.tools.plotting import autocorrelation_plot @@ -763,6 +646,11 @@ from a data set, the statistic in question is computed for this subset and the process is repeated a specified number of times. Resulting plots and histograms are what constitutes the bootstrap plot. +.. ipython:: python + :suppress: + + np.random.seed(123456) + .. ipython:: python from pandas.tools.plotting import bootstrap_plot @@ -807,6 +695,277 @@ be colored differently. @savefig radviz.png radviz(data, 'Name') +.. _visualization.formatting: + +Plot Formatting +--------------- + +Most plotting methods have a set of keyword arguments that control the +layout and formatting of the returned plot: + +.. ipython:: python + + @savefig series_plot_basic2.png + plt.figure(); ts.plot(style='k--', label='Series'); + +For each kind of plot (e.g. `line`, `bar`, `scatter`) any additional arguments +keywords are passed alogn to the corresponding matplotlib function +(:meth:`ax.plot() <matplotlib.axes.Axes.plot>`, +:meth: `ax.bar() <matplotlib.axes.Axes.bar>`, +:meth: `ax.scatter() <matplotlib.axes.Axes.scatter>`). These can be used +to control additional styling, beyond what pandas provides. + +Controlling the Legend +~~~~~~~~~~~~~~~~~~~~~~ + +You may set the ``legend`` argument to ``False`` to hide the legend, which is +shown by default. + +.. ipython:: python + :suppress: + + np.random.seed(123456) + +.. ipython:: python + + df = DataFrame(randn(1000, 4), index=ts.index, columns=list('ABCD')) + df = df.cumsum() + + @savefig frame_plot_basic_noleg.png + df.plot(legend=False) + +Scales +~~~~~~ + +You may pass ``logy`` to get a log-scale Y axis. + +.. ipython:: python + :suppress: + + plt.figure() + np.random.seed(123456) + + +.. ipython:: python + + ts = Series(randn(1000), index=date_range('1/1/2000', periods=1000)) + ts = np.exp(ts.cumsum()) + + @savefig series_plot_logy.png + ts.plot(logy=True) + +See also the ``logx`` and ``loglog`` keyword arguments. + +Plotting on a Secondary Y-axis +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To plot data on a secondary y-axis, use the ``secondary_y`` keyword: + +.. ipython:: python + :suppress: + + plt.figure() + +.. ipython:: python + + df.A.plot() + + @savefig series_plot_secondary_y.png + df.B.plot(secondary_y=True, style='g') + +To plot some columns in a DataFrame, give the column names to the ``secondary_y`` +keyword: + +.. ipython:: python + + plt.figure() + ax = df.plot(secondary_y=['A', 'B']) + ax.set_ylabel('CD scale') + @savefig frame_plot_secondary_y.png + ax.right_ax.set_ylabel('AB scale') + + +Note that the columns plotted on the secondary y-axis is automatically marked +with "(right)" in the legend. To turn off the automatic marking, use the +``mark_right=False`` keyword: + +.. ipython:: python + + plt.figure() + + @savefig frame_plot_secondary_y_no_right.png + df.plot(secondary_y=['A', 'B'], mark_right=False) + + +Suppressing Tick Resolution Adjustment +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Pandas includes automatically tick resolution adjustment for regular frequency +time-series data. For limited cases where pandas cannot infer the frequency +information (e.g., in an externally created ``twinx``), you can choose to +suppress this behavior for alignment purposes. + +Here is the default behavior, notice how the x-axis tick labelling is performed: + +.. ipython:: python + + plt.figure() + + @savefig ser_plot_suppress.png + df.A.plot() + + +Using the ``x_compat`` parameter, you can suppress this behavior: + +.. ipython:: python + + plt.figure() + + @savefig ser_plot_suppress_parm.png + df.A.plot(x_compat=True) + + +If you have more than one plot that needs to be suppressed, the ``use`` method +in ``pandas.plot_params`` can be used in a `with statement`: + +.. ipython:: python + + import pandas as pd + + plt.figure() + + @savefig ser_plot_suppress_context.png + with pd.plot_params.use('x_compat', True): + df.A.plot(color='r') + df.B.plot(color='g') + df.C.plot(color='b') + +Subplots +~~~~~~~~ + +Each Series in a DataFrame can be plotted on a different axis +with the ``subplots`` keyword: + +.. ipython:: python + + @savefig frame_plot_subplots.png + df.plot(subplots=True, figsize=(6, 6)); + +Targeting Different Subplots +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You can pass an ``ax`` argument to :meth:`Series.plot` to plot on a particular axis: + +.. ipython:: python + :suppress: + + np.random.seed(123456) + ts = Series(randn(1000), index=date_range('1/1/2000', periods=1000)) + ts = ts.cumsum() + + df = DataFrame(randn(1000, 4), index=ts.index, columns=list('ABCD')) + df = df.cumsum() + +.. ipython:: python + + fig, axes = plt.subplots(nrows=2, ncols=2) + df['A'].plot(ax=axes[0,0]); axes[0,0].set_title('A') + df['B'].plot(ax=axes[0,1]); axes[0,1].set_title('B') + df['C'].plot(ax=axes[1,0]); axes[1,0].set_title('C') + + @savefig series_plot_multi.png + df['D'].plot(ax=axes[1,1]); axes[1,1].set_title('D') + +.. ipython:: python + :suppress: + + plt.close('all') + +.. _visualization.errorbars: + +Plotting With Error Bars +~~~~~~~~~~~~~~~~~~~~~~~~ + +.. versionadded:: 0.14 + +Plotting with error bars is now supported in the :meth:`DataFrame.plot` and :meth:`Series.plot` + +Horizontal and vertical errorbars can be supplied to the ``xerr`` and ``yerr`` keyword arguments to :meth:`~DataFrame.plot()`. The error values can be specified using a variety of formats. + +- As a :class:`DataFrame` or ``dict`` of errors with column names matching the ``columns`` attribute of the plotting :class:`DataFrame` or matching the ``name`` attribute of the :class:`Series` +- As a ``str`` indicating which of the columns of plotting :class:`DataFrame` contain the error values +- As raw values (``list``, ``tuple``, or ``np.ndarray``). Must be the same length as the plotting :class:`DataFrame`/:class:`Series` + +Asymmetrical error bars are also supported, however raw error values must be provided in this case. For a ``M`` length :class:`Series`, a ``Mx2`` array should be provided indicating lower and upper (or left and right) errors. For a ``MxN`` :class:`DataFrame`, asymmetrical errors should be in a ``Mx2xN`` array. + +Here is an example of one way to easily plot group means with standard deviations from the raw data. + +.. ipython:: python + + # Generate the data + ix3 = pd.MultiIndex.from_arrays([['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b'], ['foo', 'foo', 'bar', 'bar', 'foo', 'foo', 'bar', 'bar']], names=['letter', 'word']) + df3 = pd.DataFrame({'data1': [3, 2, 4, 3, 2, 4, 3, 2], 'data2': [6, 5, 7, 5, 4, 5, 6, 5]}, index=ix3) + + # Group by index labels and take the means and standard deviations for each group + gp3 = df3.groupby(level=('letter', 'word')) + means = gp3.mean() + errors = gp3.std() + means + errors + + # Plot + fig, ax = plt.subplots() + @savefig errorbar_example.png + means.plot(yerr=errors, ax=ax, kind='bar') + +.. _visualization.table: + +Plotting Tables +~~~~~~~~~~~~~~~ + +.. versionadded:: 0.14 + +Plotting with matplotlib table is now supported in :meth:`DataFrame.plot` and :meth:`Series.plot` with a ``table`` keyword. The ``table`` keyword can accept ``bool``, :class:`DataFrame` or :class:`Series`. The simple way to draw a table is to specify ``table=True``. Data will be transposed to meet matplotlib's default layout. + +.. ipython:: python + :suppress: + + np.random.seed(123456) + +.. ipython:: python + + fig, ax = plt.subplots(1, 1) + df = DataFrame(rand(5, 3), columns=['a', 'b', 'c']) + ax.get_xaxis().set_visible(False) # Hide Ticks + + @savefig line_plot_table_true.png + df.plot(table=True, ax=ax) + +Also, you can pass different :class:`DataFrame` or :class:`Series` for ``table`` keyword. The data will be drawn as displayed in print method (not transposed automatically). If required, it should be transposed manually as below example. + +.. ipython:: python + + fig, ax = plt.subplots(1, 1) + ax.get_xaxis().set_visible(False) # Hide Ticks + @savefig line_plot_table_data.png + df.plot(table=np.round(df.T, 2), ax=ax) + + +Finally, there is a helper function ``pandas.tools.plotting.table`` to create a table from :class:`DataFrame` and :class:`Series`, and add it to an ``matplotlib.Axes``. This function can accept keywords which matplotlib table has. + +.. ipython:: python + + from pandas.tools.plotting import table + fig, ax = plt.subplots(1, 1) + + table(ax, np.round(df.describe(), 2), + loc='upper right', colWidths=[0.2, 0.2, 0.2]) + + @savefig line_plot_table_describe.png + df.plot(ax=ax, ylim=(0, 2), legend=None) + +**Note**: You can get table instances on the axes using ``axes.tables`` property for further decorations. See the `matplotlib table documenation <http://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.table>`__ for more. + .. _visualization.colormaps: Colormaps @@ -825,7 +984,12 @@ colors are selected based on an even spacing determined by the number of columns in the DataFrame. There is no consideration made for background color, so some colormaps will produce lines that are not easily visible. -To use the jet colormap, we can simply pass ``'jet'`` to ``colormap=`` +To use the cubhelix colormap, we can simply pass ``'cubehelix'`` to ``colormap=`` + +.. ipython:: python + :suppress: + + np.random.seed(123456) .. ipython:: python @@ -834,8 +998,8 @@ To use the jet colormap, we can simply pass ``'jet'`` to ``colormap=`` plt.figure() - @savefig jet.png - df.plot(colormap='jet') + @savefig cubehelix.png + df.plot(colormap='cubehelix') or we can pass the colormap itself @@ -845,11 +1009,16 @@ or we can pass the colormap itself plt.figure() - @savefig jet_cm.png - df.plot(colormap=cm.jet) + @savefig cubehelix_cm.png + df.plot(colormap=cm.cubehelix) Colormaps can also be used other plot types, like bar charts: +.. ipython:: python + :suppress: + + np.random.seed(123456) + .. ipython:: python dd = DataFrame(randn(10, 10)).applymap(abs) @@ -878,6 +1047,7 @@ Andrews curves charts: @savefig andrews_curve_winter.png andrews_curves(data, 'Name', colormap='winter') + Plotting directly with matplotlib --------------------------------- @@ -887,7 +1057,7 @@ customization is not (yet) supported by pandas. Series and DataFrame objects behave like arrays and can therefore be passed directly to matplotlib functions without explicit casts. -Pandas also automatically registers formatters and locators that recognize date +pandas also automatically registers formatters and locators that recognize date indices, thereby extending date and time support to practically all plot types available in matplotlib. Although this formatting does not provide the same level of refinement you would get when plotting via pandas, it can be faster @@ -897,6 +1067,10 @@ when plotting a large number of points. The speed up for large data sets only applies to pandas 0.14.0 and later. +.. ipython:: python + :suppress: + + np.random.seed(123456) .. ipython:: python
Closes https://github.com/pydata/pandas/issues/6994 I still need to do one more read through. Happy to hear additional suggestions.
https://api.github.com/repos/pandas-dev/pandas/pulls/7027
2014-05-02T18:07:46Z
2014-05-05T19:52:08Z
2014-05-05T19:52:08Z
2017-04-05T02:07:52Z
BUG: error in Float64Index with contains and non-float (GH7025)
diff --git a/doc/source/release.rst b/doc/source/release.rst index a0a96ebbd5c70..cfc23267f8ece 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -456,6 +456,7 @@ Bug Fixes - Bug in ``iloc`` when setting / aligning (:issue:`6766`) - Bug causing UnicodeEncodeError when get_dummies called with unicode values and a prefix (:issue:`6885`) - Bug in timeseries-with-frequency plot cursor display (:issue:`5453`) +- Bug surfaced in groupby.plot when using a ``Float64Index`` (:issue:`7025`) pandas 0.13.1 ------------- diff --git a/pandas/core/index.py b/pandas/core/index.py index ecef70d7aa43b..96ecb66e86c67 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -2047,6 +2047,8 @@ def __contains__(self, other): return len(other) <= 1 and _try_get_item(other) in self except TypeError: return False + except: + return False def get_loc(self, key): if np.isnan(key): diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index d938cc28c2588..1b70ae0309b10 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -33,6 +33,11 @@ import pandas as pd from numpy.testing import assert_equal +def _skip_if_mpl_not_installed(): + try: + import matplotlib.pyplot as plt + except ImportError: + raise nose.SkipTest("matplotlib not installed") def commonSetUp(self): self.dateRange = bdate_range('1/1/2005', periods=250) @@ -3976,11 +3981,8 @@ def test_groupby_blacklist(self): getattr(gb, bl) def test_series_groupby_plotting_nominally_works(self): - try: - import matplotlib as mpl - mpl.use('Agg') - except ImportError: - raise nose.SkipTest("matplotlib not installed") + _skip_if_mpl_not_installed() + n = 10 weight = Series(np.random.normal(166, 20, size=n)) height = Series(np.random.normal(60, 10, size=n)) @@ -3991,14 +3993,26 @@ def test_series_groupby_plotting_nominally_works(self): height.groupby(gender).hist() tm.close() + def test_plotting_with_float_index_works(self): + _skip_if_mpl_not_installed() + + # GH 7025 + df = DataFrame({'def': [1,1,1,2,2,2,3,3,3], + 'val': np.random.randn(9)}, + index=[1.0,2.0,3.0,1.0,2.0,3.0,1.0,2.0,3.0]) + + df.groupby('def')['val'].plot() + tm.close() + df.groupby('def')['val'].apply(lambda x: x.plot()) + tm.close() + @slow def test_frame_groupby_plot_boxplot(self): - try: - import matplotlib.pyplot as plt - import matplotlib as mpl - mpl.use('Agg') - except ImportError: - raise nose.SkipTest("matplotlib not installed") + _skip_if_mpl_not_installed() + + import matplotlib.pyplot as plt + import matplotlib as mpl + mpl.use('Agg') tm.close() n = 10 @@ -4029,12 +4043,10 @@ def test_frame_groupby_plot_boxplot(self): @slow def test_frame_groupby_hist(self): - try: - import matplotlib.pyplot as plt - import matplotlib as mpl - mpl.use('Agg') - except ImportError: - raise nose.SkipTest("matplotlib not installed") + _skip_if_mpl_not_installed() + import matplotlib.pyplot as plt + import matplotlib as mpl + mpl.use('Agg') tm.close() n = 10
closes #7025
https://api.github.com/repos/pandas-dev/pandas/pulls/7026
2014-05-02T12:44:11Z
2014-05-02T21:37:21Z
2014-05-02T21:37:21Z
2014-07-05T02:58:59Z
BLD: use sqlalchemy 0.7.1 in 2.6 build (GH6340)
diff --git a/ci/requirements-2.6.txt b/ci/requirements-2.6.txt index 4ed488795fb07..d101ab9d6876f 100644 --- a/ci/requirements-2.6.txt +++ b/ci/requirements-2.6.txt @@ -6,7 +6,9 @@ http://www.crummy.com/software/BeautifulSoup/bs4/download/4.2/beautifulsoup4-4.2 html5lib==1.0b2 bigquery==2.0.17 numexpr==1.4.2 -sqlalchemy==0.8.1 +sqlalchemy==0.7.1 +pymysql==0.6.0 +psycopg2==2.5 scipy==0.11.0 statsmodels==0.4.3 xlwt==0.7.5
closes #6340
https://api.github.com/repos/pandas-dev/pandas/pulls/7022
2014-05-01T20:04:30Z
2014-06-14T13:15:12Z
2014-06-14T13:15:12Z
2014-06-14T13:15:12Z
TST: tests for groupby not using grouper column, solved in GH7000, (GH5614)
diff --git a/doc/source/release.rst b/doc/source/release.rst index 4532c1d6eee11..f970d67faa752 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -188,7 +188,7 @@ API Changes validation warnings in :func:`read_csv`/:func:`read_table` (:issue:`6607`) - Raise a ``TypeError`` when ``DataFrame`` is passed an iterator as the ``data`` argument (:issue:`5357`) -- groupby will now not return the grouped column for non-cython functions (:issue:`5610`), +- groupby will now not return the grouped column for non-cython functions (:issue:`5610`, :issue:`5614`), as its already the index Deprecations diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index ffdd77bab9efd..bd91dc2d234d0 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -124,7 +124,7 @@ API changes g.nth(0, dropna='any') # similar to old behaviour - groupby will now not return the grouped column for non-cython functions (:issue:`5610`), + groupby will now not return the grouped column for non-cython functions (:issue:`5610`, :issue:`5614`), as its already the index .. ipython:: python diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index fcc4eb83b0af9..9b7c07325dfbb 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -2029,6 +2029,16 @@ def test_non_cython_api(self): result = g.idxmax() assert_frame_equal(result,expected) + # cumsum (GH5614) + df = DataFrame([[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9]], columns=['A', 'B', 'C']) + expected = DataFrame([[2, np.nan], [np.nan, 9], [4, 9]], columns=['B', 'C']) + result = df.groupby('A').cumsum() + assert_frame_equal(result,expected) + + expected = DataFrame([[1, 2, np.nan], [2, np.nan, 9], [3, 4, 9]], columns=['A', 'B', 'C']).astype('float64') + result = df.groupby('A', as_index=False).cumsum() + assert_frame_equal(result,expected) + def test_grouping_ndarray(self): grouped = self.df.groupby(self.df['A'].values) diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py index 95940ab9abbfb..5e426a75e8d63 100644 --- a/pandas/tseries/tests/test_offsets.py +++ b/pandas/tseries/tests/test_offsets.py @@ -2869,4 +2869,3 @@ def test_str_for_named_is_name(self): if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False) - \ No newline at end of file
closes #5614
https://api.github.com/repos/pandas-dev/pandas/pulls/7019
2014-05-01T14:32:42Z
2014-05-01T15:13:57Z
2014-05-01T15:13:57Z
2014-07-16T09:04:12Z
ENH: cythonize groupby.count
diff --git a/doc/source/release.rst b/doc/source/release.rst index a6aa842940bc0..2151407b8c2fd 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -313,6 +313,8 @@ Improvements to existing features in item handling (:issue:`6745`, :issue:`6988`). - Improve performance in certain reindexing operations by optimizing ``take_2d`` (:issue:`6749`) - Arrays of strings can be wrapped to a specified width (``str.wrap``) (:issue:`6999`) +- ``GroupBy.count()`` is now implemented in Cython and is much faster for large + numbers of groups (:issue:`7016`). .. _release.bug_fixes-0.14.0: diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index b5df39df3b617..f9f9e5d5e4ad3 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -568,6 +568,8 @@ Performance - Performance improvements in timedelta conversions for integer dtypes (:issue:`6754`) - Improved performance of compatible pickles (:issue:`6899`) - Improve performance in certain reindexing operations by optimizing ``take_2d`` (:issue:`6749`) +- ``GroupBy.count()`` is now implemented in Cython and is much faster for large + numbers of groups (:issue:`7016`). Experimental ~~~~~~~~~~~~ diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index ce64ed754180d..400f7e06df784 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -5,7 +5,7 @@ import collections from pandas.compat import( - zip, builtins, range, long, lrange, lzip, + zip, builtins, range, long, lzip, OrderedDict, callable ) from pandas import compat @@ -713,15 +713,6 @@ def size(self): """ return self.grouper.size() - def count(self, axis=0): - """ - Number of non-null items in each group. - axis : axis number, default 0 - the grouping axis - """ - self._set_selection_from_grouper() - return self._python_agg_general(lambda x: notnull(x).sum(axis=axis)).astype('int64') - sum = _groupby_function('sum', 'add', np.sum) prod = _groupby_function('prod', 'prod', np.prod) min = _groupby_function('min', 'min', np.min, numeric_only=False) @@ -731,6 +722,12 @@ def count(self, axis=0): last = _groupby_function('last', 'last', _last_compat, numeric_only=False, _convert=True) + _count = _groupby_function('_count', 'count', + lambda x, axis=0: notnull(x).sum(axis=axis), + numeric_only=False) + + def count(self, axis=0): + return self._count().astype('int64') def ohlc(self): """ @@ -1318,10 +1315,11 @@ def get_group_levels(self): 'f': lambda func, a, b, c, d: func(a, b, c, d, 1) }, 'last': 'group_last', + 'count': 'group_count', } _cython_transforms = { - 'std': np.sqrt + 'std': np.sqrt, } _cython_arity = { @@ -1390,14 +1388,16 @@ def aggregate(self, values, how, axis=0): values = com.ensure_float(values) is_numeric = True else: - if issubclass(values.dtype.type, np.datetime64): - raise Exception('Cython not able to handle this case') - - values = values.astype(object) - is_numeric = False + is_numeric = issubclass(values.dtype.type, (np.datetime64, + np.timedelta64)) + if is_numeric: + values = values.view('int64') + else: + values = values.astype(object) # will be filled in Cython function - result = np.empty(out_shape, dtype=values.dtype) + result = np.empty(out_shape, + dtype=np.dtype('f%d' % values.dtype.itemsize)) result.fill(np.nan) counts = np.zeros(self.ngroups, dtype=np.int64) @@ -1405,10 +1405,10 @@ def aggregate(self, values, how, axis=0): if self._filter_empty_groups: if result.ndim == 2: - if is_numeric: + try: result = lib.row_bool_subset( result, (counts > 0).view(np.uint8)) - else: + except ValueError: result = lib.row_bool_subset_object( result, (counts > 0).view(np.uint8)) else: @@ -1442,6 +1442,7 @@ def _aggregate(self, result, counts, values, how, is_numeric): chunk = chunk.squeeze() agg_func(result[:, :, i], counts, chunk, comp_ids) else: + #import ipdb; ipdb.set_trace() # XXX BREAKPOINT agg_func(result, counts, values, comp_ids) return trans_func(result) @@ -1651,6 +1652,7 @@ def names(self): 'f': lambda func, a, b, c, d: func(a, b, c, d, 1) }, 'last': 'group_last_bin', + 'count': 'group_count_bin', } _name_functions = { diff --git a/pandas/src/generate_code.py b/pandas/src/generate_code.py index 6d45a82fb7175..53754a899adf8 100644 --- a/pandas/src/generate_code.py +++ b/pandas/src/generate_code.py @@ -3,7 +3,6 @@ # don't introduce a pandas/pandas.compat import # or we get a bootstrapping problem from StringIO import StringIO -import os header = """ cimport numpy as np @@ -34,7 +33,9 @@ ctypedef unsigned char UChar cimport util -from util cimport is_array, _checknull, _checknan +from util cimport is_array, _checknull, _checknan, get_nat + +cdef int64_t iNaT = get_nat() # import datetime C API PyDateTime_IMPORT @@ -1150,6 +1151,79 @@ def group_var_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, (ct * ct - ct)) """ +group_count_template = """@cython.boundscheck(False) +@cython.wraparound(False) +def group_count_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, + ndarray[int64_t] counts, + ndarray[%(c_type)s, ndim=2] values, + ndarray[int64_t] labels): + ''' + Only aggregates on axis=0 + ''' + cdef: + Py_ssize_t i, j, lab + Py_ssize_t N = values.shape[0], K = values.shape[1] + %(c_type)s val + ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), + dtype=np.int64) + + if len(values) != len(labels): + raise AssertionError("len(index) != len(labels)") + + for i in range(N): + lab = labels[i] + if lab < 0: + continue + + counts[lab] += 1 + for j in range(K): + val = values[i, j] + + # not nan + nobs[lab, j] += val == val and val != iNaT + + for i in range(len(counts)): + for j in range(K): + out[i, j] = nobs[i, j] + + +""" + +group_count_bin_template = """@cython.boundscheck(False) +@cython.wraparound(False) +def group_count_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, + ndarray[int64_t] counts, + ndarray[%(c_type)s, ndim=2] values, + ndarray[int64_t] bins): + ''' + Only aggregates on axis=0 + ''' + cdef: + Py_ssize_t i, j, ngroups + Py_ssize_t N = values.shape[0], K = values.shape[1], b = 0 + %(c_type)s val + ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), + dtype=np.int64) + + ngroups = len(bins) + (bins[len(bins) - 1] != N) + + for i in range(N): + while b < ngroups - 1 and i >= bins[b]: + b += 1 + + counts[b] += 1 + for j in range(K): + val = values[i, j] + + # not nan + nobs[b, j] += val == val and val != iNaT + + for i in range(ngroups): + for j in range(K): + out[i, j] = nobs[i, j] + + +""" # add passing bin edges, instead of labels @@ -2145,7 +2219,8 @@ def put2d_%(name)s_%(dest_type)s(ndarray[%(c_type)s, ndim=2, cast=True] values, #------------------------------------------------------------------------- # Generators -def generate_put_template(template, use_ints = True, use_floats = True): +def generate_put_template(template, use_ints = True, use_floats = True, + use_objects=False): floats_list = [ ('float64', 'float64_t', 'float64_t', 'np.float64'), ('float32', 'float32_t', 'float32_t', 'np.float32'), @@ -2156,11 +2231,14 @@ def generate_put_template(template, use_ints = True, use_floats = True): ('int32', 'int32_t', 'float64_t', 'np.float64'), ('int64', 'int64_t', 'float64_t', 'np.float64'), ] + object_list = [('object', 'object', 'float64_t', 'np.float64')] function_list = [] if use_floats: function_list.extend(floats_list) if use_ints: function_list.extend(ints_list) + if use_objects: + function_list.extend(object_list) output = StringIO() for name, c_type, dest_type, dest_dtype in function_list: @@ -2251,6 +2329,8 @@ def generate_from_template(template, exclude=None): group_max_bin_template, group_ohlc_template] +groupby_count = [group_count_template, group_count_bin_template] + templates_1d = [map_indices_template, pad_template, backfill_template, @@ -2272,6 +2352,7 @@ def generate_from_template(template, exclude=None): take_2d_axis1_template, take_2d_multi_template] + def generate_take_cython_file(path='generated.pyx'): with open(path, 'w') as f: print(header, file=f) @@ -2288,7 +2369,10 @@ def generate_take_cython_file(path='generated.pyx'): print(generate_put_template(template), file=f) for template in groupbys: - print(generate_put_template(template, use_ints = False), file=f) + print(generate_put_template(template, use_ints=False), file=f) + + for template in groupby_count: + print(generate_put_template(template, use_objects=True), file=f) # for template in templates_1d_datetime: # print >> f, generate_from_template_datetime(template) @@ -2299,5 +2383,6 @@ def generate_take_cython_file(path='generated.pyx'): for template in nobool_1d_templates: print(generate_from_template(template, exclude=['bool']), file=f) + if __name__ == '__main__': generate_take_cython_file() diff --git a/pandas/src/generated.pyx b/pandas/src/generated.pyx index 68bda2957fb55..26c6f3daf0e0a 100644 --- a/pandas/src/generated.pyx +++ b/pandas/src/generated.pyx @@ -27,7 +27,9 @@ from khash cimport * ctypedef unsigned char UChar cimport util -from util cimport is_array, _checknull, _checknan +from util cimport is_array, _checknull, _checknan, get_nat + +cdef int64_t iNaT = get_nat() # import datetime C API PyDateTime_IMPORT @@ -6621,6 +6623,498 @@ def group_ohlc_float32(ndarray[float32_t, ndim=2] out, out[b, 2] = vlow out[b, 3] = vclose +@cython.boundscheck(False) +@cython.wraparound(False) +def group_count_float64(ndarray[float64_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[float64_t, ndim=2] values, + ndarray[int64_t] labels): + ''' + Only aggregates on axis=0 + ''' + cdef: + Py_ssize_t i, j, lab + Py_ssize_t N = values.shape[0], K = values.shape[1] + float64_t val + ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), + dtype=np.int64) + + if len(values) != len(labels): + raise AssertionError("len(index) != len(labels)") + + for i in range(N): + lab = labels[i] + if lab < 0: + continue + + counts[lab] += 1 + for j in range(K): + val = values[i, j] + + # not nan + nobs[lab, j] += val == val and val != iNaT + + for i in range(len(counts)): + for j in range(K): + out[i, j] = nobs[i, j] + + +@cython.boundscheck(False) +@cython.wraparound(False) +def group_count_float32(ndarray[float32_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[float32_t, ndim=2] values, + ndarray[int64_t] labels): + ''' + Only aggregates on axis=0 + ''' + cdef: + Py_ssize_t i, j, lab + Py_ssize_t N = values.shape[0], K = values.shape[1] + float32_t val + ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), + dtype=np.int64) + + if len(values) != len(labels): + raise AssertionError("len(index) != len(labels)") + + for i in range(N): + lab = labels[i] + if lab < 0: + continue + + counts[lab] += 1 + for j in range(K): + val = values[i, j] + + # not nan + nobs[lab, j] += val == val and val != iNaT + + for i in range(len(counts)): + for j in range(K): + out[i, j] = nobs[i, j] + + +@cython.boundscheck(False) +@cython.wraparound(False) +def group_count_int8(ndarray[float32_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[int8_t, ndim=2] values, + ndarray[int64_t] labels): + ''' + Only aggregates on axis=0 + ''' + cdef: + Py_ssize_t i, j, lab + Py_ssize_t N = values.shape[0], K = values.shape[1] + int8_t val + ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), + dtype=np.int64) + + if len(values) != len(labels): + raise AssertionError("len(index) != len(labels)") + + for i in range(N): + lab = labels[i] + if lab < 0: + continue + + counts[lab] += 1 + for j in range(K): + val = values[i, j] + + # not nan + nobs[lab, j] += val == val and val != iNaT + + for i in range(len(counts)): + for j in range(K): + out[i, j] = nobs[i, j] + + +@cython.boundscheck(False) +@cython.wraparound(False) +def group_count_int16(ndarray[float32_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[int16_t, ndim=2] values, + ndarray[int64_t] labels): + ''' + Only aggregates on axis=0 + ''' + cdef: + Py_ssize_t i, j, lab + Py_ssize_t N = values.shape[0], K = values.shape[1] + int16_t val + ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), + dtype=np.int64) + + if len(values) != len(labels): + raise AssertionError("len(index) != len(labels)") + + for i in range(N): + lab = labels[i] + if lab < 0: + continue + + counts[lab] += 1 + for j in range(K): + val = values[i, j] + + # not nan + nobs[lab, j] += val == val and val != iNaT + + for i in range(len(counts)): + for j in range(K): + out[i, j] = nobs[i, j] + + +@cython.boundscheck(False) +@cython.wraparound(False) +def group_count_int32(ndarray[float64_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[int32_t, ndim=2] values, + ndarray[int64_t] labels): + ''' + Only aggregates on axis=0 + ''' + cdef: + Py_ssize_t i, j, lab + Py_ssize_t N = values.shape[0], K = values.shape[1] + int32_t val + ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), + dtype=np.int64) + + if len(values) != len(labels): + raise AssertionError("len(index) != len(labels)") + + for i in range(N): + lab = labels[i] + if lab < 0: + continue + + counts[lab] += 1 + for j in range(K): + val = values[i, j] + + # not nan + nobs[lab, j] += val == val and val != iNaT + + for i in range(len(counts)): + for j in range(K): + out[i, j] = nobs[i, j] + + +@cython.boundscheck(False) +@cython.wraparound(False) +def group_count_int64(ndarray[float64_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[int64_t, ndim=2] values, + ndarray[int64_t] labels): + ''' + Only aggregates on axis=0 + ''' + cdef: + Py_ssize_t i, j, lab + Py_ssize_t N = values.shape[0], K = values.shape[1] + int64_t val + ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), + dtype=np.int64) + + if len(values) != len(labels): + raise AssertionError("len(index) != len(labels)") + + for i in range(N): + lab = labels[i] + if lab < 0: + continue + + counts[lab] += 1 + for j in range(K): + val = values[i, j] + + # not nan + nobs[lab, j] += val == val and val != iNaT + + for i in range(len(counts)): + for j in range(K): + out[i, j] = nobs[i, j] + + +@cython.boundscheck(False) +@cython.wraparound(False) +def group_count_object(ndarray[float64_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[object, ndim=2] values, + ndarray[int64_t] labels): + ''' + Only aggregates on axis=0 + ''' + cdef: + Py_ssize_t i, j, lab + Py_ssize_t N = values.shape[0], K = values.shape[1] + object val + ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), + dtype=np.int64) + + if len(values) != len(labels): + raise AssertionError("len(index) != len(labels)") + + for i in range(N): + lab = labels[i] + if lab < 0: + continue + + counts[lab] += 1 + for j in range(K): + val = values[i, j] + + # not nan + nobs[lab, j] += val == val and val != iNaT + + for i in range(len(counts)): + for j in range(K): + out[i, j] = nobs[i, j] + + + +@cython.boundscheck(False) +@cython.wraparound(False) +def group_count_bin_float64(ndarray[float64_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[float64_t, ndim=2] values, + ndarray[int64_t] bins): + ''' + Only aggregates on axis=0 + ''' + cdef: + Py_ssize_t i, j, ngroups + Py_ssize_t N = values.shape[0], K = values.shape[1], b = 0 + float64_t val + ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), + dtype=np.int64) + + ngroups = len(bins) + (bins[len(bins) - 1] != N) + + for i in range(N): + while b < ngroups - 1 and i >= bins[b]: + b += 1 + + counts[b] += 1 + for j in range(K): + val = values[i, j] + + # not nan + nobs[b, j] += val == val and val != iNaT + + for i in range(ngroups): + for j in range(K): + out[i, j] = nobs[i, j] + + +@cython.boundscheck(False) +@cython.wraparound(False) +def group_count_bin_float32(ndarray[float32_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[float32_t, ndim=2] values, + ndarray[int64_t] bins): + ''' + Only aggregates on axis=0 + ''' + cdef: + Py_ssize_t i, j, ngroups + Py_ssize_t N = values.shape[0], K = values.shape[1], b = 0 + float32_t val + ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), + dtype=np.int64) + + ngroups = len(bins) + (bins[len(bins) - 1] != N) + + for i in range(N): + while b < ngroups - 1 and i >= bins[b]: + b += 1 + + counts[b] += 1 + for j in range(K): + val = values[i, j] + + # not nan + nobs[b, j] += val == val and val != iNaT + + for i in range(ngroups): + for j in range(K): + out[i, j] = nobs[i, j] + + +@cython.boundscheck(False) +@cython.wraparound(False) +def group_count_bin_int8(ndarray[float32_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[int8_t, ndim=2] values, + ndarray[int64_t] bins): + ''' + Only aggregates on axis=0 + ''' + cdef: + Py_ssize_t i, j, ngroups + Py_ssize_t N = values.shape[0], K = values.shape[1], b = 0 + int8_t val + ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), + dtype=np.int64) + + ngroups = len(bins) + (bins[len(bins) - 1] != N) + + for i in range(N): + while b < ngroups - 1 and i >= bins[b]: + b += 1 + + counts[b] += 1 + for j in range(K): + val = values[i, j] + + # not nan + nobs[b, j] += val == val and val != iNaT + + for i in range(ngroups): + for j in range(K): + out[i, j] = nobs[i, j] + + +@cython.boundscheck(False) +@cython.wraparound(False) +def group_count_bin_int16(ndarray[float32_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[int16_t, ndim=2] values, + ndarray[int64_t] bins): + ''' + Only aggregates on axis=0 + ''' + cdef: + Py_ssize_t i, j, ngroups + Py_ssize_t N = values.shape[0], K = values.shape[1], b = 0 + int16_t val + ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), + dtype=np.int64) + + ngroups = len(bins) + (bins[len(bins) - 1] != N) + + for i in range(N): + while b < ngroups - 1 and i >= bins[b]: + b += 1 + + counts[b] += 1 + for j in range(K): + val = values[i, j] + + # not nan + nobs[b, j] += val == val and val != iNaT + + for i in range(ngroups): + for j in range(K): + out[i, j] = nobs[i, j] + + +@cython.boundscheck(False) +@cython.wraparound(False) +def group_count_bin_int32(ndarray[float64_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[int32_t, ndim=2] values, + ndarray[int64_t] bins): + ''' + Only aggregates on axis=0 + ''' + cdef: + Py_ssize_t i, j, ngroups + Py_ssize_t N = values.shape[0], K = values.shape[1], b = 0 + int32_t val + ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), + dtype=np.int64) + + ngroups = len(bins) + (bins[len(bins) - 1] != N) + + for i in range(N): + while b < ngroups - 1 and i >= bins[b]: + b += 1 + + counts[b] += 1 + for j in range(K): + val = values[i, j] + + # not nan + nobs[b, j] += val == val and val != iNaT + + for i in range(ngroups): + for j in range(K): + out[i, j] = nobs[i, j] + + +@cython.boundscheck(False) +@cython.wraparound(False) +def group_count_bin_int64(ndarray[float64_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[int64_t, ndim=2] values, + ndarray[int64_t] bins): + ''' + Only aggregates on axis=0 + ''' + cdef: + Py_ssize_t i, j, ngroups + Py_ssize_t N = values.shape[0], K = values.shape[1], b = 0 + int64_t val + ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), + dtype=np.int64) + + ngroups = len(bins) + (bins[len(bins) - 1] != N) + + for i in range(N): + while b < ngroups - 1 and i >= bins[b]: + b += 1 + + counts[b] += 1 + for j in range(K): + val = values[i, j] + + # not nan + nobs[b, j] += val == val and val != iNaT + + for i in range(ngroups): + for j in range(K): + out[i, j] = nobs[i, j] + + +@cython.boundscheck(False) +@cython.wraparound(False) +def group_count_bin_object(ndarray[float64_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[object, ndim=2] values, + ndarray[int64_t] bins): + ''' + Only aggregates on axis=0 + ''' + cdef: + Py_ssize_t i, j, ngroups + Py_ssize_t N = values.shape[0], K = values.shape[1], b = 0 + object val + ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), + dtype=np.int64) + + ngroups = len(bins) + (bins[len(bins) - 1] != N) + + for i in range(N): + while b < ngroups - 1 and i >= bins[b]: + b += 1 + + counts[b] += 1 + for j in range(K): + val = values[i, j] + + # not nan + nobs[b, j] += val == val and val != iNaT + + for i in range(ngroups): + for j in range(K): + out[i, j] = nobs[i, j] + + + @cython.wraparound(False) @cython.boundscheck(False) def left_join_indexer_unique_float64(ndarray[float64_t] left, diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index 1b70ae0309b10..eb3c28b672fd4 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -1451,7 +1451,6 @@ def test_groupby_head_tail(self): assert_frame_equal(g_not_as[['B']].head(1), df_as.loc[[0,2], ['B']]) assert_frame_equal(g_not_as[['A', 'B']].head(1), df_as.loc[[0,2]]) - def test_groupby_multiple_key(self): df = tm.makeTimeDataFrame() grouped = df.groupby([lambda x: x.year, @@ -1629,6 +1628,21 @@ def test_cython_agg_nothing_to_agg(self): 'b': ['foo', 'bar'] * 25}) self.assertRaises(DataError, frame[['b']].groupby(frame['a']).mean) + def test_cython_agg_nothing_to_agg_with_dates(self): + frame = DataFrame({'a': np.random.randint(0, 5, 50), + 'b': ['foo', 'bar'] * 25, + 'dates': pd.date_range('now', periods=50, + freq='T')}) + with tm.assertRaisesRegexp(DataError, "No numeric types to aggregate"): + frame.groupby('b').dates.mean() + + def test_groupby_timedelta_cython_count(self): + df = DataFrame({'g': list('ab' * 2), + 'delt': np.arange(4).astype('timedelta64[ns]')}) + expected = Series([2, 2], index=['a', 'b'], name='delt') + result = df.groupby('g').delt.count() + tm.assert_series_equal(expected, result) + def test_cython_agg_frame_columns(self): # #2113 df = DataFrame({'x': [1, 2, 3], 'y': [3, 4, 5]}) @@ -1992,7 +2006,8 @@ def test_count(self): # GH5610 # count counts non-nulls - df = pd.DataFrame([[1, 2, 'foo'], [1, nan, 'bar'], [3, nan, nan]], columns=['A', 'B', 'C']) + df = pd.DataFrame([[1, 2, 'foo'], [1, nan, 'bar'], [3, nan, nan]], + columns=['A', 'B', 'C']) count_as = df.groupby('A').count() count_not_as = df.groupby('A', as_index=False).count() @@ -2005,6 +2020,19 @@ def test_count(self): count_B = df.groupby('A')['B'].count() assert_series_equal(count_B, expected['B']) + def test_count_object(self): + df = pd.DataFrame({'a': ['a'] * 3 + ['b'] * 3, + 'c': [2] * 3 + [3] * 3}) + result = df.groupby('c').a.count() + expected = pd.Series([3, 3], index=[2, 3], name='a') + tm.assert_series_equal(result, expected) + + df = pd.DataFrame({'a': ['a', np.nan, np.nan] + ['b'] * 3, + 'c': [2] * 3 + [3] * 3}) + result = df.groupby('c').a.count() + expected = pd.Series([1, 3], index=[2, 3], name='a') + tm.assert_series_equal(result, expected) + def test_non_cython_api(self): # GH5610 @@ -2354,7 +2382,6 @@ def test_groupby_aggregation_mixed_dtype(self): result = g[['v1','v2']].mean() assert_frame_equal(result,expected) - def test_groupby_dtype_inference_empty(self): # GH 6733 df = DataFrame({'x': [], 'range': np.arange(0,dtype='int64')}) @@ -3325,7 +3352,6 @@ def test_cumcount_groupby_not_col(self): assert_series_equal(expected, g.cumcount()) assert_series_equal(expected, sg.cumcount()) - def test_filter_series(self): import pandas as pd s = pd.Series([1, 3, 20, 5, 22, 24, 7]) @@ -4168,6 +4194,14 @@ def test_nargsort(self): expected = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1)) assert_equal(result, expected) + def test_datetime_count(self): + df = DataFrame({'a': [1,2,3] * 2, + 'dates': pd.date_range('now', periods=6, freq='T')}) + result = df.groupby('a').dates.count() + expected = Series([2, 2, 2], index=Index([1, 2, 3], name='a'), + name='dates') + tm.assert_series_equal(result, expected) + def assert_fp_equal(a, b): assert (np.abs(a - b) < 1e-12).all() diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index 6d99d38049e5a..df9c465c33853 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -462,6 +462,12 @@ class NaTType(_NaT): def __hash__(self): return iNaT + def __int__(self): + return NPY_NAT + + def __long__(self): + return NPY_NAT + def weekday(self): return -1 diff --git a/vb_suite/groupby.py b/vb_suite/groupby.py index 01644153b28e1..638862ffd1367 100644 --- a/vb_suite/groupby.py +++ b/vb_suite/groupby.py @@ -119,6 +119,36 @@ def f(): setup, start_date=datetime(2011, 10, 1)) #---------------------------------------------------------------------- +# count() speed + +setup = common_setup + """ +n = 10000 +offsets = np.random.randint(n, size=n).astype('timedelta64[ns]') + +dates = np.datetime64('now') + offsets +dates[np.random.rand(n) > 0.5] = np.datetime64('nat') + +offsets[np.random.rand(n) > 0.5] = np.timedelta64('nat') + +value2 = np.random.randn(n) +value2[np.random.rand(n) > 0.5] = np.nan + +obj = pd.util.testing.choice(['a', 'b'], size=n).astype(object) +obj[np.random.randn(n) > 0.5] = np.nan + +df = DataFrame({'key1': np.random.randint(0, 500, size=n), + 'key2': np.random.randint(0, 100, size=n), + 'dates': dates, + 'value2' : value2, + 'value3' : np.random.randn(n), + 'obj': obj, + 'offsets': offsets}) +""" + +groupby_multi_count = Benchmark("df.groupby(['key1', 'key2']).count()", + setup, name='groupby_multi_count', + start_date=datetime(2014, 5, 5)) +#---------------------------------------------------------------------- # Series.value_counts setup = common_setup + """ @@ -151,11 +181,11 @@ def f(): ind2 = np.random.randint(0, 2, size=100000) df = DataFrame({'key1': fac1.take(ind1), - 'key2': fac2.take(ind2), - 'key3': fac2.take(ind2), - 'value1' : np.random.randn(100000), - 'value2' : np.random.randn(100000), - 'value3' : np.random.randn(100000)}) +'key2': fac2.take(ind2), +'key3': fac2.take(ind2), +'value1' : np.random.randn(100000), +'value2' : np.random.randn(100000), +'value3' : np.random.randn(100000)}) """ stmt = "df.pivot_table(rows='key1', cols=['key2', 'key3'])" @@ -192,13 +222,13 @@ def f(): start_date=datetime(2012, 5, 1)) groupby_first_float32 = Benchmark('data2.groupby(labels).first()', setup, - start_date=datetime(2013, 1, 1)) + start_date=datetime(2013, 1, 1)) groupby_last = Benchmark('data.groupby(labels).last()', setup, start_date=datetime(2012, 5, 1)) groupby_last_float32 = Benchmark('data2.groupby(labels).last()', setup, - start_date=datetime(2013, 1, 1)) + start_date=datetime(2013, 1, 1)) #---------------------------------------------------------------------- @@ -256,9 +286,9 @@ def f(): labels = np.random.randint(0, 2000, size=N) labels2 = np.random.randint(0, 3, size=N) df = DataFrame({'key': labels, - 'key2': labels2, - 'value1': randn(N), - 'value2': ['foo', 'bar', 'baz', 'qux'] * (N / 4)}) +'key2': labels2, +'value1': randn(N), +'value2': ['foo', 'bar', 'baz', 'qux'] * (N / 4)}) def f(g): return 1 """
closes #7003 - [x] vbench - ~~axis parameter~~ (this only works in non-cython land) - [x] tests for `object` dtype if there are none - [x] datetime64/timedelta64 count vbench results: ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- groupby_multi_count | 7.3980 | 6814.2579 | 0.0011 | ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7016
2014-04-30T22:12:47Z
2014-05-05T22:18:47Z
2014-05-05T22:18:47Z
2014-06-13T01:26:49Z
ERR: more informative error message on bool arith op failures
diff --git a/doc/source/release.rst b/doc/source/release.rst index b8e5b31bd873c..86407ed19a772 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -206,6 +206,8 @@ API Changes - Added ``factorize`` functions to ``Index`` and ``Series`` to get indexer and unique values (:issue:`7090`) - :meth:`DataFrame.describe` on a DataFrame with a mix of Timestamp and string like objects returns a different Index (:issue:`7088`). Previously the index was unintentionally sorted. +- arithmetic operations with **only** ``bool`` dtypes now raise an error + (:issue:`7011`, :issue:`6762`, :issue:`7015`) Deprecations ~~~~~~~~~~~~ diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 4c099c627e6e5..b5b261d4728be 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -228,6 +228,18 @@ Display Changes length of the series (:issue:`7101`) - Fixed a bug in the HTML repr of a truncated Series or DataFrame not showing the class name with the `large_repr` set to 'info' (:issue:`7105`) +- arithmetic operations with **only** ``bool`` dtypes now raise an error + (:issue:`7011`, :issue:`6762`, :issue:`7015`) + + .. code-block:: python + + x = pd.Series(np.random.rand(10) > 0.5) + y = True + x * y + + # this now raises for arith ops like ``+``, ``*``, etc. + NotImplementedError: operator '*' not implemented for bool dtypes + .. _whatsnew_0140.groupby: diff --git a/pandas/computation/expressions.py b/pandas/computation/expressions.py index 128aa5bf2b511..bfb29e0d4fa10 100644 --- a/pandas/computation/expressions.py +++ b/pandas/computation/expressions.py @@ -158,7 +158,10 @@ def _has_bool_dtype(x): try: return x.dtype == bool except AttributeError: - return 'bool' in x.blocks + try: + return 'bool' in x.blocks + except AttributeError: + return isinstance(x, (bool, np.bool_)) def _bool_arith_check(op_str, a, b, not_allowed=frozenset(('+', '*', '-', '/', diff --git a/pandas/core/ops.py b/pandas/core/ops.py index d4e756371001b..a52c1034e63cb 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -61,25 +61,25 @@ def names(x): default_axis=default_axis, fill_zeros=np.inf), # Causes a floating point exception in the tests when numexpr # enabled, so for now no speedup - mod=arith_method(operator.mod, names('mod'), default_axis=default_axis, - fill_zeros=np.nan), + mod=arith_method(operator.mod, names('mod'), None, + default_axis=default_axis, fill_zeros=np.nan), pow=arith_method(operator.pow, names('pow'), op('**'), default_axis=default_axis), # not entirely sure why this is necessary, but previously was included # so it's here to maintain compatibility - rmul=arith_method(operator.mul, names('rmul'), + rmul=arith_method(operator.mul, names('rmul'), op('*'), default_axis=default_axis), - rsub=arith_method(lambda x, y: y - x, names('rsub'), + rsub=arith_method(lambda x, y: y - x, names('rsub'), op('-'), default_axis=default_axis), rtruediv=arith_method(lambda x, y: operator.truediv(y, x), - names('rtruediv'), truediv=True, + names('rtruediv'), op('/'), truediv=True, fill_zeros=np.inf, default_axis=default_axis), rfloordiv=arith_method(lambda x, y: operator.floordiv(y, x), - names('rfloordiv'), default_axis=default_axis, - fill_zeros=np.inf), - rpow=arith_method(lambda x, y: y ** x, names('rpow'), + names('rfloordiv'), op('//'), + default_axis=default_axis, fill_zeros=np.inf), + rpow=arith_method(lambda x, y: y ** x, names('rpow'), op('**'), default_axis=default_axis), - rmod=arith_method(lambda x, y: y % x, names('rmod'), + rmod=arith_method(lambda x, y: y % x, names('rmod'), op('%'), default_axis=default_axis), ) new_methods['div'] = new_methods['truediv'] @@ -100,11 +100,11 @@ def names(x): and_=bool_method(operator.and_, names('and_'), op('&')), or_=bool_method(operator.or_, names('or_'), op('|')), # For some reason ``^`` wasn't used in original. - xor=bool_method(operator.xor, names('xor')), + xor=bool_method(operator.xor, names('xor'), op('^')), rand_=bool_method(lambda x, y: operator.and_(y, x), - names('rand_')), - ror_=bool_method(lambda x, y: operator.or_(y, x), names('ror_')), - rxor=bool_method(lambda x, y: operator.xor(y, x), names('rxor')) + names('rand_'), op('&')), + ror_=bool_method(lambda x, y: operator.or_(y, x), names('ror_'), op('|')), + rxor=bool_method(lambda x, y: operator.xor(y, x), names('rxor'), op('^')) )) new_methods = dict((names(k), v) for k, v in new_methods.items()) @@ -431,7 +431,7 @@ def maybe_convert_for_time_op(cls, left, right, name): return cls(left, right, name) -def _arith_method_SERIES(op, name, str_rep=None, fill_zeros=None, +def _arith_method_SERIES(op, name, str_rep, fill_zeros=None, default_axis=None, **eval_kwargs): """ Wrapper function for Series arithmetic operations, to avoid @@ -506,7 +506,7 @@ def wrapper(left, right, name=name): return wrapper -def _comp_method_SERIES(op, name, str_rep=None, masker=False): +def _comp_method_SERIES(op, name, str_rep, masker=False): """ Wrapper function for Series arithmetic operations, to avoid code duplication. @@ -578,7 +578,7 @@ def wrapper(self, other): return wrapper -def _bool_method_SERIES(op, name, str_rep=None): +def _bool_method_SERIES(op, name, str_rep): """ Wrapper function for Series arithmetic operations, to avoid code duplication. @@ -647,7 +647,7 @@ def _radd_compat(left, right): return output -def _flex_method_SERIES(op, name, str_rep=None, default_axis=None, +def _flex_method_SERIES(op, name, str_rep, default_axis=None, fill_zeros=None, **eval_kwargs): doc = """ Binary operator %s with support to substitute a fill_value for missing data diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index fdea275b7e040..09fc991dc1726 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -357,6 +357,18 @@ def test_bool_ops_raise_on_arithmetic(self): with tm.assertRaisesRegexp(NotImplementedError, err_msg): f(df.a, df.b) + with tm.assertRaisesRegexp(NotImplementedError, err_msg): + f(df.a, True) + + with tm.assertRaisesRegexp(NotImplementedError, err_msg): + f(False, df.a) + + with tm.assertRaisesRegexp(TypeError, err_msg): + f(False, df) + + with tm.assertRaisesRegexp(TypeError, err_msg): + f(df, True) + if __name__ == '__main__': import nose
closes #7011
https://api.github.com/repos/pandas-dev/pandas/pulls/7015
2014-04-30T20:52:28Z
2014-05-14T05:00:40Z
2014-05-14T05:00:40Z
2014-06-25T08:57:46Z
Added selection example to cookbook
diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst index 46a5d8c870a9d..c2ad8492473b3 100644 --- a/doc/source/cookbook.rst +++ b/doc/source/cookbook.rst @@ -60,28 +60,27 @@ Selection The :ref:`indexing <indexing>` docs. -Indexing using both row labels and conditionals, see -`here +`Indexing using both row labels and conditionals <http://stackoverflow.com/questions/14725068/pandas-using-row-labels-in-boolean-indexing>`__ -Use loc for label-oriented slicing and iloc positional slicing, see -`here <https://github.com/pydata/pandas/issues/2904>`__ +`Use loc for label-oriented slicing and iloc positional slicing +<https://github.com/pydata/pandas/issues/2904>`__ -Extend a panel frame by transposing, adding a new dimension, and transposing back to the original dimensions, see -`here +`Extend a panel frame by transposing, adding a new dimension, and transposing back to the original dimensions <http://stackoverflow.com/questions/15364050/extending-a-pandas-panel-frame-along-the-minor-axis>`__ -Mask a panel by using ``np.where`` and then reconstructing the panel with the new masked values -`here +`Mask a panel by using ``np.where`` and then reconstructing the panel with the new masked values <http://stackoverflow.com/questions/14650341/boolean-mask-in-pandas-panel>`__ -Using ``~`` to take the complement of a boolean array, see -`here +`Using ``~`` to take the complement of a boolean array, see <http://stackoverflow.com/questions/14986510/picking-out-elements-based-on-complement-of-indices-in-python-pandas>`__ `Efficiently creating columns using applymap <http://stackoverflow.com/questions/16575868/efficiently-creating-additional-columns-in-a-pandas-dataframe-using-map>`__ +`Keep other columns when using min() with groupby +<http://stackoverflow.com/questions/23394476/keep-other-columns-when-using-min-with-groupby>`__ + .. _cookbook.multi_index: MultiIndexing
I added a (for me) very helpful Q&A from Stackoverflow on groupby/minimization while retaining the original DataFrame columns. Additionally, I cleaned up some links in the same cookbok section. (Disclaimer: this is my first pull request.)
https://api.github.com/repos/pandas-dev/pandas/pulls/7012
2014-04-30T18:15:27Z
2014-04-30T18:56:37Z
2014-04-30T18:56:37Z
2014-06-30T11:25:11Z
BLD/TST: Python3.4 testing via travis
diff --git a/.travis.yml b/.travis.yml index 5eef300d0644e..d4341b11c6eb9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -46,16 +46,32 @@ matrix: - FULL_DEPS=true - CLIPBOARD=xsel - JOB_NAME: "33_nslow" + - python: 3.4 + env: + - EXPERIMENTAL=true + - NOSE_ARGS="not slow and not disabled" + - FULL_DEPS=true + - CLIPBOARD=xsel + - JOB_NAME: "34_nslow" - python: 2.7 env: + - EXPERIMENTAL=true - NOSE_ARGS="not slow and not network and not disabled" - JOB_NAME: "27_numpy_master" - JOB_TAG=_NUMPY_DEV_master - NUMPY_BUILD=master - PANDAS_TESTING_MODE="numpy_deprecate" allow_failures: + - python: 3.4 + env: + - EXPERIMENTAL=true + - NOSE_ARGS="not slow and not disabled" + - FULL_DEPS=true + - CLIPBOARD=xsel + - JOB_NAME: "34_nslow" - python: 2.7 env: + - EXPERIMENTAL=true - NOSE_ARGS="not slow and not network and not disabled" - JOB_NAME: "27_numpy_master" - JOB_TAG=_NUMPY_DEV_master diff --git a/ci/install.sh b/ci/install.sh index e6829750b64e8..7a335d91f474a 100755 --- a/ci/install.sh +++ b/ci/install.sh @@ -31,11 +31,38 @@ edit_init python_major_version="${TRAVIS_PYTHON_VERSION:0:1}" [ "$python_major_version" == "2" ] && python_major_version="" -# fix these versions -pip install -I pip==1.5.1 -pip install -I setuptools==2.2 +home_dir=$(pwd) + +# known working +# pip==1.5.1 +# setuptools==2.2 +# wheel==0.22 +# nose==1.3.0 (1.3.1 broken for PY3) + +pip install -I -U pip +pip install -I -U setuptools pip install wheel==0.22 -pip install nose==1.3.0 + +# install nose +pip uninstall nose -y + +if [ -n "$EXPERIMENTAL" ]; then + + # install from master + rm -Rf /tmp/nose + cd /tmp + git clone --branch master https://github.com/nose-devs/nose.git nose + cd nose + python setup.py install + cd $home_dir + +else + + # known good version + pip install nose==1.3.0 + +fi + # comment this line to disable the fetching of wheel files base_url=http://pandas.pydata.org/pandas-build/dev/wheels @@ -54,8 +81,8 @@ time sudo apt-get $APT_ARGS install libatlas-base-dev gfortran if [ -n "$NUMPY_BUILD" ]; then # building numpy - curdir=$(pwd) + cd $home_dir echo "cloning numpy" rm -Rf /tmp/numpy @@ -70,12 +97,10 @@ if [ -n "$NUMPY_BUILD" ]; then # clone & install git clone --branch $NUMPY_BUILD https://github.com/numpy/numpy.git numpy cd numpy - wd=${pwd} time pip install . pip uninstall cython -y cd $curdir - echo "building numpy: $wd" numpy_version=$(python -c 'import numpy; print(numpy.__version__)') echo "[$curdir] numpy current: $numpy_version" fi @@ -83,7 +108,7 @@ fi # Force virtualenv to accept system_site_packages rm -f $VIRTUAL_ENV/lib/python$TRAVIS_PYTHON_VERSION/no-global-site-packages.txt - +cd $home_dir time pip install $PIP_ARGS -r ci/requirements-${wheel_box}.txt # Need to enable for locale testing. The location of the locale file(s) is diff --git a/ci/requirements-3.4.txt b/ci/requirements-3.4.txt index 8be3c17eb9ade..ef4b710a35b58 100644 --- a/ci/requirements-3.4.txt +++ b/ci/requirements-3.4.txt @@ -6,15 +6,14 @@ xlrd==0.9.2 html5lib==1.0b2 numpy==1.8.0 cython==0.20.0 +scipy==0.13.3 numexpr==2.3 tables==3.1.0 bottleneck==0.8.0 matplotlib==1.3.1 patsy==0.2.1 lxml==3.2.1 -scipy==0.13.3 -beautifulsoup4==4.2.1 -statsmodels==0.5.0 sqlalchemy==0.9.3 pymysql==0.6.1 psycopg2==2.5.2 +beautifulsoup4==4.2.1 diff --git a/doc/source/install.rst b/doc/source/install.rst index f67bdc10a457f..a73922caeaf1d 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -16,10 +16,7 @@ compiler (MinGW or Visual Studio) installed. `How-to install MinGW on Windows Python version support ~~~~~~~~~~~~~~~~~~~~~~ -Officially Python 2.6 to 2.7 and Python 3.2+. Python 2.4 and Python 2.5 are no -longer supported since the userbase has shrunk significantly. Continuing Python -2.4 and 2.5 support will require either monetary development support or someone -contributing to the project to restore compatibility. +Officially Python 2.6, 2.7, 3.2, 3.3, and 3.4. Binary installers diff --git a/doc/source/release.rst b/doc/source/release.rst index 91bf6084e0faa..4532c1d6eee11 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -53,6 +53,7 @@ pandas 0.14.0 New features ~~~~~~~~~~~~ +- Officially support Python 3.4 - ``Index`` returns a MultiIndex if passed a list of tuples ``DataFrame(dict)`` and ``Series(dict)`` create ``MultiIndex`` columns and index where applicable (:issue:`4187`) diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 120faa3c3f2c8..ffdd77bab9efd 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -9,6 +9,7 @@ users upgrade to this version. - Highlights include: + - Officially support Python 3.4 - SQL interfaces updated to use ``sqlalchemy``, See :ref:`Here<whatsnew_0140.sql>`. - MultiIndexing Using Slicers, See :ref:`Here<whatsnew_0140.slicers>`. - Ability to join a singly-indexed DataFrame with a multi-indexed DataFrame, see :ref:`Here <merging.join_on_mi>`
closes #6619
https://api.github.com/repos/pandas-dev/pandas/pulls/7009
2014-04-30T13:00:43Z
2014-04-30T15:48:21Z
2014-04-30T15:48:21Z
2014-07-16T09:03:59Z
BLD: fix failing vbench cases
diff --git a/vb_suite/eval.py b/vb_suite/eval.py index 36aa702b5602a..a350cdc54cd17 100644 --- a/vb_suite/eval.py +++ b/vb_suite/eval.py @@ -78,7 +78,6 @@ name='eval_frame_and_one_thread', start_date=datetime(2013, 7, 26)) -setup = common_setup eval_frame_and_python = \ Benchmark("pd.eval('(df > 0) & (df2 > 0) & (df3 > 0) & (df4 > 0)', engine='python')", common_setup, name='eval_frame_and_python', @@ -102,7 +101,6 @@ name='eval_frame_chained_cmp_one_thread', start_date=datetime(2013, 7, 26)) -# setup = common_setup eval_frame_chained_cmp_python = \ Benchmark("pd.eval('df < df2 < df3 < df4', engine='python')", common_setup, name='eval_frame_chained_cmp_python', @@ -129,7 +127,7 @@ df = DataFrame({'dates': s.values}) """ -query_datetime_series = Benchmark("df.query('dates < ts')", +query_datetime_series = Benchmark("df.query('dates < @ts')", series_setup, start_date=datetime(2013, 9, 27)) @@ -137,7 +135,7 @@ df = DataFrame({'a': np.random.randn(N)}, index=index) """ -query_datetime_index = Benchmark("df.query('index < ts')", +query_datetime_index = Benchmark("df.query('index < @ts')", index_setup, start_date=datetime(2013, 9, 27)) setup = setup + """ @@ -147,6 +145,6 @@ max_val = df['a'].max() """ -query_with_boolean_selection = Benchmark("df.query('(a >= min_val) & (a <= max_val)')", - index_setup, start_date=datetime(2013, 9, 27)) +query_with_boolean_selection = Benchmark("df.query('(a >= @min_val) & (a <= @max_val)')", + setup, start_date=datetime(2013, 9, 27)) diff --git a/vb_suite/frame_ctor.py b/vb_suite/frame_ctor.py index 8180b39b116fe..713237779494e 100644 --- a/vb_suite/frame_ctor.py +++ b/vb_suite/frame_ctor.py @@ -45,15 +45,35 @@ frame_ctor_nested_dict_int64 = Benchmark("DataFrame(data)", setup) # dynamically generate benchmarks for every offset +# +# get_period_count & get_index_for_offset are there because blindly taking each +# offset times 1000 can easily go out of Timestamp bounds and raise errors. dynamic_benchmarks = {} n_steps = [1, 2] for offset in offsets.__all__: for n in n_steps: setup = common_setup + """ -df = DataFrame(np.random.randn(1000,10),index=date_range('1/1/1900',periods=1000,freq={}({}))) + +def get_period_count(start_date, off): + ten_offsets_in_days = ((start_date + off * 10) - start_date).days + if ten_offsets_in_days == 0: + return 1000 + else: + return min(9 * ((Timestamp.max - start_date).days // + ten_offsets_in_days), + 1000) + +def get_index_for_offset(off): + start_date = Timestamp('1/1/1900') + return date_range(start_date, + periods=min(1000, get_period_count(start_date, off)), + freq=off) + +idx = get_index_for_offset({}({})) +df = DataFrame(np.random.randn(len(idx),10), index=idx) d = dict([ (col,df[col]) for col in df.columns ]) """.format(offset, n) - key = 'frame_ctor_dtindex_{}({})'.format(offset, n) + key = 'frame_ctor_dtindex_{}x{}'.format(offset, n) dynamic_benchmarks[key] = Benchmark("DataFrame(d)", setup, name=key) # Have to stuff them in globals() so vbench detects them diff --git a/vb_suite/groupby.py b/vb_suite/groupby.py index dc8103b0ceea2..01644153b28e1 100644 --- a/vb_suite/groupby.py +++ b/vb_suite/groupby.py @@ -267,22 +267,22 @@ def f(g): start_date=datetime(2011, 10, 1)) groupby_frame_apply = Benchmark("df.groupby(['key', 'key2']).apply(f)", setup, - start_date=datetime(2011, 10, 1)) + start_date=datetime(2011, 10, 1)) #---------------------------------------------------------------------- # DataFrame nth setup = common_setup + """ -df = pd.DataFrame(np.random.randint(1, 100, (10000, 2))) +df = DataFrame(np.random.randint(1, 100, (10000, 2))) """ # Not really a fair test as behaviour has changed! groupby_frame_nth = Benchmark("df.groupby(0).nth(0)", setup, - start_date=datetime(2014, 3, 1)) + start_date=datetime(2014, 3, 1)) groupby_series_nth = Benchmark("df[1].groupby(df[0]).nth(0)", setup, - start_date=datetime(2014, 3, 1)) + start_date=datetime(2014, 3, 1)) #---------------------------------------------------------------------- diff --git a/vb_suite/index_object.py b/vb_suite/index_object.py index 5a98481a689a6..e6bd32737d567 100644 --- a/vb_suite/index_object.py +++ b/vb_suite/index_object.py @@ -11,7 +11,7 @@ # intersection, union setup = common_setup + """ -rng = DatetimeIndex('1/1/2000', periods=10000, offset=datetools.Minute()) +rng = DatetimeIndex(start='1/1/2000', periods=10000, freq=datetools.Minute()) if rng.dtype == object: rng = rng.view(Index) else: diff --git a/vb_suite/join_merge.py b/vb_suite/join_merge.py index 45f3f510d9f08..f91f45f661af0 100644 --- a/vb_suite/join_merge.py +++ b/vb_suite/join_merge.py @@ -223,15 +223,15 @@ def sample(values, k): # GH 6329 setup = common_setup + """ -date_index = pd.date_range('01-Jan-2013', '23-Jan-2013', freq='T') +date_index = date_range('01-Jan-2013', '23-Jan-2013', freq='T') daily_dates = date_index.to_period('D').to_timestamp('S','S') fracofday = date_index.view(np.ndarray) - daily_dates.view(np.ndarray) fracofday = fracofday.astype('timedelta64[ns]').astype(np.float64)/864e11 -fracofday = pd.TimeSeries(fracofday, daily_dates) -index = pd.date_range(date_index.min().to_period('A').to_timestamp('D','S'), +fracofday = TimeSeries(fracofday, daily_dates) +index = date_range(date_index.min().to_period('A').to_timestamp('D','S'), date_index.max().to_period('A').to_timestamp('D','E'), freq='D') -temp = pd.TimeSeries(1.0, index) +temp = TimeSeries(1.0, index) """ join_non_unique_equal = Benchmark('fracofday * temp[fracofday.index]', setup, diff --git a/vb_suite/packers.py b/vb_suite/packers.py index ca0193e9b2c10..40227b3c9bc48 100644 --- a/vb_suite/packers.py +++ b/vb_suite/packers.py @@ -106,7 +106,7 @@ def remove(f): setup_int_index = """ import numpy as np -df.index = np.arange(50000) +df.index = np.arange(N) """ setup = common_setup + """ diff --git a/vb_suite/panel_ctor.py b/vb_suite/panel_ctor.py index e304a48e5d73f..b6637bb1e61ec 100644 --- a/vb_suite/panel_ctor.py +++ b/vb_suite/panel_ctor.py @@ -11,7 +11,8 @@ setup_same_index = common_setup + """ # create 100 dataframes with the same index -dr = np.asarray(DatetimeIndex(datetime(1990,1,1), datetime(2012,1,1))) +dr = np.asarray(DatetimeIndex(start=datetime(1990,1,1), end=datetime(2012,1,1), + freq=datetools.Day(1))) data_frames = {} for x in xrange(100): df = DataFrame({"a": [0]*len(dr), "b": [1]*len(dr), @@ -27,7 +28,8 @@ setup_equiv_indexes = common_setup + """ data_frames = {} for x in xrange(100): - dr = np.asarray(DatetimeIndex(datetime(1990,1,1), datetime(2012,1,1))) + dr = np.asarray(DatetimeIndex(start=datetime(1990,1,1), end=datetime(2012,1,1), + freq=datetools.Day(1))) df = DataFrame({"a": [0]*len(dr), "b": [1]*len(dr), "c": [2]*len(dr)}, index=dr) data_frames[x] = df @@ -44,7 +46,7 @@ end = datetime(2012,1,1) for x in xrange(100): end += timedelta(days=1) - dr = np.asarray(DateRange(start, end)) + dr = np.asarray(date_range(start, end)) df = DataFrame({"a": [0]*len(dr), "b": [1]*len(dr), "c": [2]*len(dr)}, index=dr) data_frames[x] = df @@ -62,7 +64,7 @@ for x in xrange(100): if x == 50: end += timedelta(days=1) - dr = np.asarray(DateRange(start, end)) + dr = np.asarray(date_range(start, end)) df = DataFrame({"a": [0]*len(dr), "b": [1]*len(dr), "c": [2]*len(dr)}, index=dr) data_frames[x] = df diff --git a/vb_suite/panel_methods.py b/vb_suite/panel_methods.py index 45790feb4a58e..5e88671a23707 100644 --- a/vb_suite/panel_methods.py +++ b/vb_suite/panel_methods.py @@ -15,7 +15,7 @@ panel_shift = Benchmark('panel.shift(1)', setup, start_date=datetime(2012, 1, 12)) -panel_shift_minor = Benchmark('panel.shift(1, axis=minor)', setup, +panel_shift_minor = Benchmark('panel.shift(1, axis="minor")', setup, start_date=datetime(2012, 1, 12)) panel_pct_change_major = Benchmark('panel.pct_change(1, axis="major")', setup, diff --git a/vb_suite/reindex.py b/vb_suite/reindex.py index ca82ee9b82649..5d3d07783c9a8 100644 --- a/vb_suite/reindex.py +++ b/vb_suite/reindex.py @@ -18,7 +18,7 @@ #---------------------------------------------------------------------- setup = common_setup + """ -rng = DatetimeIndex('1/1/1970', periods=10000, offset=datetools.Minute()) +rng = DatetimeIndex(start='1/1/1970', periods=10000, freq=datetools.Minute()) df = DataFrame(np.random.rand(10000, 10), index=rng, columns=range(10)) df['foo'] = 'bar' @@ -51,7 +51,7 @@ # Pad / backfill setup = common_setup + """ -rng = DateRange('1/1/2000', periods=100000, offset=datetools.Minute()) +rng = date_range('1/1/2000', periods=100000, freq=datetools.Minute()) ts = Series(np.random.randn(len(rng)), index=rng) ts2 = ts[::2] diff --git a/vb_suite/stat_ops.py b/vb_suite/stat_ops.py index a3a1df70dc248..f4ea6706c193c 100644 --- a/vb_suite/stat_ops.py +++ b/vb_suite/stat_ops.py @@ -87,7 +87,7 @@ stats_rank_pct_average = Benchmark('s.rank(pct=True)', setup, start_date=datetime(2014, 01, 16)) -stats_rank_pct_average_old = Benchmark('s.rank() / s.size()', setup, +stats_rank_pct_average_old = Benchmark('s.rank() / len(s)', setup, start_date=datetime(2014, 01, 16)) setup = common_setup + """ values = np.random.randint(0, 100000, size=200000) diff --git a/vb_suite/strings.py b/vb_suite/strings.py index 459684ec0e435..96791cd52f1cf 100644 --- a/vb_suite/strings.py +++ b/vb_suite/strings.py @@ -46,13 +46,13 @@ def make_series(letters, strlen, size): strings_get = Benchmark("many.str.get(0)", setup) setup = setup + """ -make_series(string.uppercase, strlen=10, size=10000).str.join('|') +s = make_series(string.uppercase, strlen=10, size=10000).str.join('|') """ strings_get_dummies = Benchmark("s.str.get_dummies('|')", setup) setup = common_setup + """ import pandas.util.testing as testing -ser = pd.Series(testing.makeUnicodeIndex()) +ser = Series(testing.makeUnicodeIndex()) """ strings_encode_decode = Benchmark("ser.str.encode('utf-8').str.decode('utf-8')", setup)
There were about 30 vbench tests that failed due to multiple recent deprecations and copy-pasting. This PR fixes all but 8 of those that correspond to constructing frame with "exotic" time offsets (`WeekOfYear`, `LastWeekOfYear`, `FY5253` and `FY5253Quarter`) that apparently require some kwargs for construction. This also fixes the "different name/same test" situation with `eval_frame_and` benchmarks where for some reason single-thread and multi-thread cases share setup code.
https://api.github.com/repos/pandas-dev/pandas/pulls/7008
2014-04-30T09:34:09Z
2014-04-30T12:48:54Z
2014-04-30T12:48:54Z
2014-06-24T04:26:08Z
BUG: fixing tseries plot cursor display, resolves #5453
diff --git a/doc/source/release.rst b/doc/source/release.rst index 91bf6084e0faa..975d92cc215c4 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -450,8 +450,9 @@ Bug Fixes - Bug in enabling ``subplots=True`` in ``DataFrame.plot`` only has single column raises ``TypeError``, and ``Series.plot`` raises ``AttributeError`` (:issue:`6951`) - Bug in ``DataFrame.plot`` draws unnecessary axes when enabling ``subplots`` and ``kind=scatter`` (:issue:`6951`) - Bug in ``read_csv`` from a filesystem with non-utf-8 encoding (:issue:`6807`) -- Bug in ``iloc`` when setting / aligning (:issue:``6766`) +- Bug in ``iloc`` when setting / aligning (:issue:`6766`) - Bug causing UnicodeEncodeError when get_dummies called with unicode values and a prefix (:issue:`6885`) +- Bug in timeseries-with-frequency plot cursor display (:issue:`5453`) pandas 0.13.1 ------------- diff --git a/pandas/tseries/plotting.py b/pandas/tseries/plotting.py index ae32367a57cd3..abec1d469114f 100644 --- a/pandas/tseries/plotting.py +++ b/pandas/tseries/plotting.py @@ -83,8 +83,7 @@ def tsplot(series, plotf, **kwargs): ax.set_xlim(left, right) # x and y coord info - tz = series.index.to_datetime().tz - ax.format_coord = lambda t, y : "t = {} y = {:8f}".format(datetime.fromtimestamp(t, tz), y) + ax.format_coord = lambda t, y: "t = {} y = {:8f}".format(Period(ordinal=int(t), freq=ax.freq), y) return lines diff --git a/pandas/tseries/tests/test_plotting.py b/pandas/tseries/tests/test_plotting.py index 118c09ddf826f..5d1e4b67041f7 100644 --- a/pandas/tseries/tests/test_plotting.py +++ b/pandas/tseries/tests/test_plotting.py @@ -131,6 +131,21 @@ def test_get_datevalue(self): self.assertEqual(get_datevalue('1/1/1987', 'D'), Period('1987-1-1', 'D').ordinal) + @slow + def test_ts_plot_format_coord(self): + def check_format_of_first_point(ax, expected_string): + first_line = ax.get_lines()[0] + first_x = first_line.get_xdata()[0].ordinal + first_y = first_line.get_ydata()[0] + self.assertEqual(expected_string, ax.format_coord(first_x, first_y)) + + annual = Series(1, index=date_range('2014-01-01', periods=3, freq='A-DEC')) + check_format_of_first_point(annual.plot(), 't = 2014 y = 1.000000') + + # note this is added to the annual plot already in existence, and changes its freq field + daily = Series(1, index=date_range('2014-01-01', periods=3, freq='D')) + check_format_of_first_point(daily.plot(), 't = 2014-01-01 y = 1.000000') + @slow def test_line_plot_period_series(self): for s in self.period_ser:
Fixes #5453 I am not sure how to test a mouse-over, and the original commit 922c6102eebb7347cea587fffc4795a3ca3e73b2 introducing this functionality did not have tests either, which is sadly probably how this got broken. @willfurnass do you happen to have any ideas for how to test this functionality, maybe from when you first looked at this? The new code doesn't display timezone information like the old code once did (before it was broken) as Periods don't seem to have timezone info, and all `tseries/plotting.py` plots are Period-based. One could perhaps do some minor acrobatics to convert the tz-naive start time of the Period into a tz-aware Timestamp, I can research that if people feel strongly about it.
https://api.github.com/repos/pandas-dev/pandas/pulls/7007
2014-04-30T04:27:11Z
2014-05-01T17:50:35Z
2014-05-01T17:50:35Z
2014-06-12T21:07:21Z
BUG: duplicate indexing with setitem with iloc (GH6766)
diff --git a/doc/source/release.rst b/doc/source/release.rst index b63a40d512b97..8a92b318e3d8f 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -450,6 +450,7 @@ Bug Fixes - Bug in enabling ``subplots=True`` in ``DataFrame.plot`` only has single column raises ``TypeError``, and ``Series.plot`` raises ``AttributeError`` (:issue:`6951`) - Bug in ``DataFrame.plot`` draws unnecessary axes when enabling ``subplots`` and ``kind=scatter`` (:issue:`6951`) - Bug in ``read_csv`` from a filesystem with non-utf-8 encoding (:issue:`6807`) +- Bug in ``iloc`` when setting / aligning (:issue:``6766`) pandas 0.13.1 ------------- diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 63988a5976fc9..a1bcab159cefa 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -385,6 +385,7 @@ def setter(item, v): s._data = s._data.setitem(indexer=pi, value=v) s._maybe_update_cacher(clear=True) + # reset the sliced object if unique self.obj[item] = s def can_do_equal_len(): diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index d89a88138b8fb..d42babc7cddbe 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -630,6 +630,36 @@ def test_loc_setitem_dups(self): df.loc[indexer]*=2.0 assert_frame_equal(df.loc[indexer],2.0*df_orig.loc[indexer]) + def test_iloc_setitem_dups(self): + + # GH 6766 + # iloc with a mask aligning from another iloc + df1 = DataFrame([{'A':None, 'B':1},{'A':2, 'B':2}]) + df2 = DataFrame([{'A':3, 'B':3},{'A':4, 'B':4}]) + df = concat([df1, df2], axis=1) + + expected = df.fillna(3) + expected['A'] = expected['A'].astype('float64') + inds = np.isnan(df.iloc[:, 0]) + mask = inds[inds].index + df.iloc[mask,0] = df.iloc[mask,2] + assert_frame_equal(df, expected) + + # del a dup column across blocks + expected = DataFrame({ 0 : [1,2], 1 : [3,4] }) + expected.columns=['B','B'] + del df['A'] + assert_frame_equal(df, expected) + + # assign back to self + df.iloc[[0,1],[0,1]] = df.iloc[[0,1],[0,1]] + assert_frame_equal(df, expected) + + # reversed x 2 + df.iloc[[1,0],[0,1]] = df.iloc[[1,0],[0,1]].reset_index(drop=True) + df.iloc[[1,0],[0,1]] = df.iloc[[1,0],[0,1]].reset_index(drop=True) + assert_frame_equal(df, expected) + def test_chained_getitem_with_lists(self): # GH6394
closes #6766
https://api.github.com/repos/pandas-dev/pandas/pulls/7006
2014-04-30T00:43:53Z
2014-04-30T01:05:17Z
2014-04-30T01:05:17Z
2014-07-16T09:03:54Z
DEPR: lookup, append
diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py index a28e20a636ce2..5bb87b8bb2663 100644 --- a/asv_bench/benchmarks/frame_methods.py +++ b/asv_bench/benchmarks/frame_methods.py @@ -28,26 +28,6 @@ def time_frame_get_numeric_data(self): self.df._get_numeric_data() -class Lookup: - def setup(self): - self.df = DataFrame(np.random.randn(10000, 8), columns=list("abcdefgh")) - self.df["foo"] = "bar" - self.row_labels = list(self.df.index[::10])[:900] - self.col_labels = list(self.df.columns) * 100 - self.row_labels_all = np.array( - list(self.df.index) * len(self.df.columns), dtype="object" - ) - self.col_labels_all = np.array( - list(self.df.columns) * len(self.df.index), dtype="object" - ) - - def time_frame_fancy_lookup(self): - self.df.lookup(self.row_labels, self.col_labels) - - def time_frame_fancy_lookup_all(self): - self.df.lookup(self.row_labels_all, self.col_labels_all) - - class Reindex: def setup(self): N = 10**3 diff --git a/asv_bench/benchmarks/join_merge.py b/asv_bench/benchmarks/join_merge.py index 2309347ac96d8..d9fb3c8a8ff89 100644 --- a/asv_bench/benchmarks/join_merge.py +++ b/asv_bench/benchmarks/join_merge.py @@ -22,26 +22,6 @@ from pandas import ordered_merge as merge_ordered -class Append: - def setup(self): - self.df1 = DataFrame(np.random.randn(10000, 4), columns=["A", "B", "C", "D"]) - self.df2 = self.df1.copy() - self.df2.index = np.arange(10000, 20000) - self.mdf1 = self.df1.copy() - self.mdf1["obj1"] = "bar" - self.mdf1["obj2"] = "bar" - self.mdf1["int1"] = 5 - self.mdf1 = self.mdf1._consolidate() - self.mdf2 = self.mdf1.copy() - self.mdf2.index = self.df2.index - - def time_append_homogenous(self): - self.df1.append(self.df2) - - def time_append_mixed(self): - self.mdf1.append(self.mdf2) - - class Concat: params = [0, 1] diff --git a/doc/redirects.csv b/doc/redirects.csv index f0fab09196f26..b177be0c5c321 100644 --- a/doc/redirects.csv +++ b/doc/redirects.csv @@ -315,7 +315,6 @@ generated/pandas.DataFrame.aggregate,../reference/api/pandas.DataFrame.aggregate generated/pandas.DataFrame.align,../reference/api/pandas.DataFrame.align generated/pandas.DataFrame.all,../reference/api/pandas.DataFrame.all generated/pandas.DataFrame.any,../reference/api/pandas.DataFrame.any -generated/pandas.DataFrame.append,../reference/api/pandas.DataFrame.append generated/pandas.DataFrame.apply,../reference/api/pandas.DataFrame.apply generated/pandas.DataFrame.applymap,../reference/api/pandas.DataFrame.applymap generated/pandas.DataFrame.as_blocks,../reference/api/pandas.DataFrame.as_blocks @@ -408,7 +407,6 @@ generated/pandas.DataFrame.last,../reference/api/pandas.DataFrame.last generated/pandas.DataFrame.last_valid_index,../reference/api/pandas.DataFrame.last_valid_index generated/pandas.DataFrame.le,../reference/api/pandas.DataFrame.le generated/pandas.DataFrame.loc,../reference/api/pandas.DataFrame.loc -generated/pandas.DataFrame.lookup,../reference/api/pandas.DataFrame.lookup generated/pandas.DataFrame.lt,../reference/api/pandas.DataFrame.lt generated/pandas.DataFrame.mask,../reference/api/pandas.DataFrame.mask generated/pandas.DataFrame.max,../reference/api/pandas.DataFrame.max @@ -917,7 +915,6 @@ generated/pandas.Series.aggregate,../reference/api/pandas.Series.aggregate generated/pandas.Series.align,../reference/api/pandas.Series.align generated/pandas.Series.all,../reference/api/pandas.Series.all generated/pandas.Series.any,../reference/api/pandas.Series.any -generated/pandas.Series.append,../reference/api/pandas.Series.append generated/pandas.Series.apply,../reference/api/pandas.Series.apply generated/pandas.Series.argmax,../reference/api/pandas.Series.argmax generated/pandas.Series.argmin,../reference/api/pandas.Series.argmin diff --git a/doc/source/reference/frame.rst b/doc/source/reference/frame.rst index 6c0b6a4752875..ea19bb6d85aed 100644 --- a/doc/source/reference/frame.rst +++ b/doc/source/reference/frame.rst @@ -66,7 +66,6 @@ Indexing, iteration DataFrame.keys DataFrame.iterrows DataFrame.itertuples - DataFrame.lookup DataFrame.pop DataFrame.tail DataFrame.xs @@ -250,7 +249,6 @@ Combining / comparing / joining / merging .. autosummary:: :toctree: api/ - DataFrame.append DataFrame.assign DataFrame.compare DataFrame.join diff --git a/doc/source/reference/series.rst b/doc/source/reference/series.rst index c8bbe922f5313..659385c611ff0 100644 --- a/doc/source/reference/series.rst +++ b/doc/source/reference/series.rst @@ -247,7 +247,6 @@ Combining / comparing / joining / merging .. autosummary:: :toctree: api/ - Series.append Series.compare Series.update diff --git a/doc/source/user_guide/indexing.rst b/doc/source/user_guide/indexing.rst index dbd6d2757e1be..6566a1d67d1c9 100644 --- a/doc/source/user_guide/indexing.rst +++ b/doc/source/user_guide/indexing.rst @@ -1559,7 +1559,7 @@ For instance: df.reindex(cols, axis=1).to_numpy()[np.arange(len(df)), idx] Formerly this could be achieved with the dedicated ``DataFrame.lookup`` method -which was deprecated in version 1.2.0. +which was deprecated in version 1.2.0 and removed in version 2.0.0. .. _indexing.class: diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index c6b5816d12061..0f2dfd3d4c8a1 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -265,6 +265,9 @@ Removal of prior version deprecations/changes - Enforced deprecation changing behavior when passing ``datetime64[ns]`` dtype data and timezone-aware dtype to :class:`Series`, interpreting the values as wall-times instead of UTC times, matching :class:`DatetimeIndex` behavior (:issue:`41662`) - Removed deprecated :meth:`DataFrame._AXIS_NUMBERS`, :meth:`DataFrame._AXIS_NAMES`, :meth:`Series._AXIS_NUMBERS`, :meth:`Series._AXIS_NAMES` (:issue:`33637`) - Removed deprecated :meth:`Index.to_native_types`, use ``obj.astype(str)`` instead (:issue:`36418`) +- Removed deprecated :meth:`Series.iteritems`, :meth:`DataFrame.iteritems`, use ``obj.items`` instead (:issue:`45321`) +- Removed deprecated :meth:`DataFrame.lookup` (:issue:`35224`) +- Removed deprecated :meth:`Series.append`, :meth:`DataFrame.append`, use :func:`concat` instead (:issue:`35407`) - Removed deprecated :meth:`Series.iteritems`, :meth:`DataFrame.iteritems` and :meth:`HDFStore.iteritems` use ``obj.items`` instead (:issue:`45321`) - Removed deprecated :meth:`DatetimeIndex.union_many` (:issue:`45018`) - Removed deprecated ``weekofyear`` and ``week`` attributes of :class:`DatetimeArray`, :class:`DatetimeIndex` and ``dt`` accessor in favor of ``isocalendar().week`` (:issue:`33595`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 2965baf837419..ec5ea5b9b19d5 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4870,69 +4870,6 @@ def _series(self): for idx, item in enumerate(self.columns) } - def lookup( - self, row_labels: Sequence[IndexLabel], col_labels: Sequence[IndexLabel] - ) -> np.ndarray: - """ - Label-based "fancy indexing" function for DataFrame. - - .. deprecated:: 1.2.0 - DataFrame.lookup is deprecated, - use pandas.factorize and NumPy indexing instead. - For further details see - :ref:`Looking up values by index/column labels <indexing.lookup>`. - - Given equal-length arrays of row and column labels, return an - array of the values corresponding to each (row, col) pair. - - Parameters - ---------- - row_labels : sequence - The row labels to use for lookup. - col_labels : sequence - The column labels to use for lookup. - - Returns - ------- - numpy.ndarray - The found values. - """ - msg = ( - "The 'lookup' method is deprecated and will be " - "removed in a future version. " - "You can use DataFrame.melt and DataFrame.loc " - "as a substitute." - ) - warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) - - n = len(row_labels) - if n != len(col_labels): - raise ValueError("Row labels must have same size as column labels") - if not (self.index.is_unique and self.columns.is_unique): - # GH#33041 - raise ValueError("DataFrame.lookup requires unique index and columns") - - thresh = 1000 - if not self._is_mixed_type or n > thresh: - values = self.values - ridx = self.index.get_indexer(row_labels) - cidx = self.columns.get_indexer(col_labels) - if (ridx == -1).any(): - raise KeyError("One or more row labels was not found") - if (cidx == -1).any(): - raise KeyError("One or more column labels was not found") - flat_index = ridx * len(self.columns) + cidx - result = values.flat[flat_index] - else: - result = np.empty(n, dtype="O") - for i, (r, c) in enumerate(zip(row_labels, col_labels)): - result[i] = self._get_value(r, c) - - if is_object_dtype(result): - result = lib.maybe_convert_objects(result) - - return result - # ---------------------------------------------------------------------- # Reindexing and alignment @@ -9562,118 +9499,6 @@ def infer(x): # ---------------------------------------------------------------------- # Merging / joining methods - def append( - self, - other, - ignore_index: bool = False, - verify_integrity: bool = False, - sort: bool = False, - ) -> DataFrame: - """ - Append rows of `other` to the end of caller, returning a new object. - - .. deprecated:: 1.4.0 - Use :func:`concat` instead. For further details see - :ref:`whatsnew_140.deprecations.frame_series_append` - - Columns in `other` that are not in the caller are added as new columns. - - Parameters - ---------- - other : DataFrame or Series/dict-like object, or list of these - The data to append. - ignore_index : bool, default False - If True, the resulting axis will be labeled 0, 1, …, n - 1. - verify_integrity : bool, default False - If True, raise ValueError on creating index with duplicates. - sort : bool, default False - Sort columns if the columns of `self` and `other` are not aligned. - - .. versionchanged:: 1.0.0 - - Changed to not sort by default. - - Returns - ------- - DataFrame - A new DataFrame consisting of the rows of caller and the rows of `other`. - - See Also - -------- - concat : General function to concatenate DataFrame or Series objects. - - Notes - ----- - If a list of dict/series is passed and the keys are all contained in - the DataFrame's index, the order of the columns in the resulting - DataFrame will be unchanged. - - Iteratively appending rows to a DataFrame can be more computationally - intensive than a single concatenate. A better solution is to append - those rows to a list and then concatenate the list with the original - DataFrame all at once. - - Examples - -------- - >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'), index=['x', 'y']) - >>> df - A B - x 1 2 - y 3 4 - >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'), index=['x', 'y']) - >>> df.append(df2) - A B - x 1 2 - y 3 4 - x 5 6 - y 7 8 - - With `ignore_index` set to True: - - >>> df.append(df2, ignore_index=True) - A B - 0 1 2 - 1 3 4 - 2 5 6 - 3 7 8 - - The following, while not recommended methods for generating DataFrames, - show two ways to generate a DataFrame from multiple data sources. - - Less efficient: - - >>> df = pd.DataFrame(columns=['A']) - >>> for i in range(5): - ... df = df.append({'A': i}, ignore_index=True) - >>> df - A - 0 0 - 1 1 - 2 2 - 3 3 - 4 4 - - More efficient: - - >>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)], - ... ignore_index=True) - A - 0 0 - 1 1 - 2 2 - 3 3 - 4 4 - """ - warnings.warn( - "The frame.append method is deprecated " - "and will be removed from pandas in a future version. " - "Use pandas.concat instead.", - FutureWarning, - stacklevel=find_stack_level(), - ) - - return self._append(other, ignore_index, verify_integrity, sort) - def _append( self, other, diff --git a/pandas/core/series.py b/pandas/core/series.py index bba225bb91caf..8e2234a9e6f88 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2976,92 +2976,6 @@ def searchsorted( # type: ignore[override] # ------------------------------------------------------------------- # Combination - def append( - self, to_append, ignore_index: bool = False, verify_integrity: bool = False - ) -> Series: - """ - Concatenate two or more Series. - - .. deprecated:: 1.4.0 - Use :func:`concat` instead. For further details see - :ref:`whatsnew_140.deprecations.frame_series_append` - - Parameters - ---------- - to_append : Series or list/tuple of Series - Series to append with self. - ignore_index : bool, default False - If True, the resulting axis will be labeled 0, 1, …, n - 1. - verify_integrity : bool, default False - If True, raise Exception on creating index with duplicates. - - Returns - ------- - Series - Concatenated Series. - - See Also - -------- - concat : General function to concatenate DataFrame or Series objects. - - Notes - ----- - Iteratively appending to a Series can be more computationally intensive - than a single concatenate. A better solution is to append values to a - list and then concatenate the list with the original Series all at - once. - - Examples - -------- - >>> s1 = pd.Series([1, 2, 3]) - >>> s2 = pd.Series([4, 5, 6]) - >>> s3 = pd.Series([4, 5, 6], index=[3, 4, 5]) - >>> s1.append(s2) - 0 1 - 1 2 - 2 3 - 0 4 - 1 5 - 2 6 - dtype: int64 - - >>> s1.append(s3) - 0 1 - 1 2 - 2 3 - 3 4 - 4 5 - 5 6 - dtype: int64 - - With `ignore_index` set to True: - - >>> s1.append(s2, ignore_index=True) - 0 1 - 1 2 - 2 3 - 3 4 - 4 5 - 5 6 - dtype: int64 - - With `verify_integrity` set to True: - - >>> s1.append(s2, verify_integrity=True) - Traceback (most recent call last): - ... - ValueError: Indexes have overlapping values: [0, 1, 2] - """ - warnings.warn( - "The series.append method is deprecated " - "and will be removed from pandas in a future version. " - "Use pandas.concat instead.", - FutureWarning, - stacklevel=find_stack_level(), - ) - - return self._append(to_append, ignore_index, verify_integrity) - def _append( self, to_append, ignore_index: bool = False, verify_integrity: bool = False ): diff --git a/pandas/tests/frame/indexing/test_lookup.py b/pandas/tests/frame/indexing/test_lookup.py deleted file mode 100644 index caab5feea853b..0000000000000 --- a/pandas/tests/frame/indexing/test_lookup.py +++ /dev/null @@ -1,94 +0,0 @@ -import numpy as np -import pytest - -from pandas import ( - DataFrame, - Series, -) -import pandas._testing as tm - - -class TestLookup: - def test_lookup_float(self, float_frame): - df = float_frame - rows = list(df.index) * len(df.columns) - cols = list(df.columns) * len(df.index) - with tm.assert_produces_warning(FutureWarning): - result = df.lookup(rows, cols) - - expected = np.array([df.loc[r, c] for r, c in zip(rows, cols)]) - tm.assert_numpy_array_equal(result, expected) - - def test_lookup_mixed(self, float_string_frame): - df = float_string_frame - rows = list(df.index) * len(df.columns) - cols = list(df.columns) * len(df.index) - with tm.assert_produces_warning(FutureWarning): - result = df.lookup(rows, cols) - - expected = np.array( - [df.loc[r, c] for r, c in zip(rows, cols)], dtype=np.object_ - ) - tm.assert_almost_equal(result, expected) - - def test_lookup_bool(self): - df = DataFrame( - { - "label": ["a", "b", "a", "c"], - "mask_a": [True, True, False, True], - "mask_b": [True, False, False, False], - "mask_c": [False, True, False, True], - } - ) - with tm.assert_produces_warning(FutureWarning): - df["mask"] = df.lookup(df.index, "mask_" + df["label"]) - - exp_mask = np.array( - [df.loc[r, c] for r, c in zip(df.index, "mask_" + df["label"])] - ) - - tm.assert_series_equal(df["mask"], Series(exp_mask, name="mask")) - assert df["mask"].dtype == np.bool_ - - def test_lookup_raises(self, float_frame): - with pytest.raises(KeyError, match="'One or more row labels was not found'"): - with tm.assert_produces_warning(FutureWarning): - float_frame.lookup(["xyz"], ["A"]) - - with pytest.raises(KeyError, match="'One or more column labels was not found'"): - with tm.assert_produces_warning(FutureWarning): - float_frame.lookup([float_frame.index[0]], ["xyz"]) - - with pytest.raises(ValueError, match="same size"): - with tm.assert_produces_warning(FutureWarning): - float_frame.lookup(["a", "b", "c"], ["a"]) - - def test_lookup_requires_unique_axes(self): - # GH#33041 raise with a helpful error message - df = DataFrame(np.random.randn(6).reshape(3, 2), columns=["A", "A"]) - - rows = [0, 1] - cols = ["A", "A"] - - # homogeneous-dtype case - with pytest.raises(ValueError, match="requires unique index and columns"): - with tm.assert_produces_warning(FutureWarning): - df.lookup(rows, cols) - with pytest.raises(ValueError, match="requires unique index and columns"): - with tm.assert_produces_warning(FutureWarning): - df.T.lookup(cols, rows) - - # heterogeneous dtype - df["B"] = 0 - with pytest.raises(ValueError, match="requires unique index and columns"): - with tm.assert_produces_warning(FutureWarning): - df.lookup(rows, cols) - - -def test_lookup_deprecated(): - # GH#18262 - df = DataFrame( - {"col": ["A", "A", "B", "B"], "A": [80, 23, np.nan, 22], "B": [80, 55, 76, 67]} - ) - with tm.assert_produces_warning(FutureWarning): - df.lookup(df.index, df["col"]) diff --git a/pandas/tests/frame/methods/test_append.py b/pandas/tests/frame/methods/test_append.py deleted file mode 100644 index 54d7d95ed4570..0000000000000 --- a/pandas/tests/frame/methods/test_append.py +++ /dev/null @@ -1,287 +0,0 @@ -import numpy as np -import pytest - -import pandas as pd -from pandas import ( - DataFrame, - Series, - Timestamp, - date_range, - timedelta_range, -) -import pandas._testing as tm - - -class TestDataFrameAppend: - @pytest.mark.filterwarnings("ignore:.*append method is deprecated.*:FutureWarning") - def test_append_multiindex(self, multiindex_dataframe_random_data, frame_or_series): - obj = multiindex_dataframe_random_data - obj = tm.get_obj(obj, frame_or_series) - - a = obj[:5] - b = obj[5:] - - result = a.append(b) - tm.assert_equal(result, obj) - - def test_append_empty_list(self): - # GH 28769 - df = DataFrame() - result = df._append([]) - expected = df - tm.assert_frame_equal(result, expected) - assert result is not df - - df = DataFrame(np.random.randn(5, 4), columns=["foo", "bar", "baz", "qux"]) - result = df._append([]) - expected = df - tm.assert_frame_equal(result, expected) - assert result is not df # ._append() should return a new object - - def test_append_series_dict(self): - df = DataFrame(np.random.randn(5, 4), columns=["foo", "bar", "baz", "qux"]) - - series = df.loc[4] - msg = "Indexes have overlapping values" - with pytest.raises(ValueError, match=msg): - df._append(series, verify_integrity=True) - - series.name = None - msg = "Can only append a Series if ignore_index=True" - with pytest.raises(TypeError, match=msg): - df._append(series, verify_integrity=True) - - result = df._append(series[::-1], ignore_index=True) - expected = df._append( - DataFrame({0: series[::-1]}, index=df.columns).T, ignore_index=True - ) - tm.assert_frame_equal(result, expected) - - # dict - result = df._append(series.to_dict(), ignore_index=True) - tm.assert_frame_equal(result, expected) - - result = df._append(series[::-1][:3], ignore_index=True) - expected = df._append( - DataFrame({0: series[::-1][:3]}).T, ignore_index=True, sort=True - ) - tm.assert_frame_equal(result, expected.loc[:, result.columns]) - - msg = "Can only append a dict if ignore_index=True" - with pytest.raises(TypeError, match=msg): - df._append(series.to_dict()) - - # can append when name set - row = df.loc[4] - row.name = 5 - result = df._append(row) - expected = df._append(df[-1:], ignore_index=True) - tm.assert_frame_equal(result, expected) - - def test_append_list_of_series_dicts(self): - df = DataFrame(np.random.randn(5, 4), columns=["foo", "bar", "baz", "qux"]) - - dicts = [x.to_dict() for idx, x in df.iterrows()] - - result = df._append(dicts, ignore_index=True) - expected = df._append(df, ignore_index=True) - tm.assert_frame_equal(result, expected) - - # different columns - dicts = [ - {"foo": 1, "bar": 2, "baz": 3, "peekaboo": 4}, - {"foo": 5, "bar": 6, "baz": 7, "peekaboo": 8}, - ] - result = df._append(dicts, ignore_index=True, sort=True) - expected = df._append(DataFrame(dicts), ignore_index=True, sort=True) - tm.assert_frame_equal(result, expected) - - def test_append_list_retain_index_name(self): - df = DataFrame( - [[1, 2], [3, 4]], index=pd.Index(["a", "b"], name="keepthisname") - ) - - serc = Series([5, 6], name="c") - - expected = DataFrame( - [[1, 2], [3, 4], [5, 6]], - index=pd.Index(["a", "b", "c"], name="keepthisname"), - ) - - # append series - result = df._append(serc) - tm.assert_frame_equal(result, expected) - - # append list of series - result = df._append([serc]) - tm.assert_frame_equal(result, expected) - - def test_append_missing_cols(self): - # GH22252 - # exercise the conditional branch in append method where the data - # to be appended is a list and does not contain all columns that are in - # the target DataFrame - df = DataFrame(np.random.randn(5, 4), columns=["foo", "bar", "baz", "qux"]) - - dicts = [{"foo": 9}, {"bar": 10}] - result = df._append(dicts, ignore_index=True, sort=True) - - expected = df._append(DataFrame(dicts), ignore_index=True, sort=True) - tm.assert_frame_equal(result, expected) - - def test_append_empty_dataframe(self): - - # Empty df append empty df - df1 = DataFrame() - df2 = DataFrame() - result = df1._append(df2) - expected = df1.copy() - tm.assert_frame_equal(result, expected) - - # Non-empty df append empty df - df1 = DataFrame(np.random.randn(5, 2)) - df2 = DataFrame() - result = df1._append(df2) - expected = df1.copy() - tm.assert_frame_equal(result, expected) - - # Empty df with columns append empty df - df1 = DataFrame(columns=["bar", "foo"]) - df2 = DataFrame() - result = df1._append(df2) - expected = df1.copy() - tm.assert_frame_equal(result, expected) - - # Non-Empty df with columns append empty df - df1 = DataFrame(np.random.randn(5, 2), columns=["bar", "foo"]) - df2 = DataFrame() - result = df1._append(df2) - expected = df1.copy() - tm.assert_frame_equal(result, expected) - - def test_append_dtypes(self, using_array_manager): - - # GH 5754 - # row appends of different dtypes (so need to do by-item) - # can sometimes infer the correct type - - df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(5)) - df2 = DataFrame() - result = df1._append(df2) - expected = df1.copy() - tm.assert_frame_equal(result, expected) - - df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(1)) - df2 = DataFrame({"bar": "foo"}, index=range(1, 2)) - result = df1._append(df2) - expected = DataFrame({"bar": [Timestamp("20130101"), "foo"]}) - tm.assert_frame_equal(result, expected) - - df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(1)) - df2 = DataFrame({"bar": np.nan}, index=range(1, 2)) - result = df1._append(df2) - expected = DataFrame( - {"bar": Series([Timestamp("20130101"), np.nan], dtype="M8[ns]")} - ) - if using_array_manager: - # TODO(ArrayManager) decide on exact casting rules in concat - # With ArrayManager, all-NaN float is not ignored - expected = expected.astype(object) - tm.assert_frame_equal(result, expected) - - df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(1)) - df2 = DataFrame({"bar": np.nan}, index=range(1, 2), dtype=object) - result = df1._append(df2) - expected = DataFrame( - {"bar": Series([Timestamp("20130101"), np.nan], dtype="M8[ns]")} - ) - if using_array_manager: - # With ArrayManager, all-NaN float is not ignored - expected = expected.astype(object) - tm.assert_frame_equal(result, expected) - - df1 = DataFrame({"bar": np.nan}, index=range(1)) - df2 = DataFrame({"bar": Timestamp("20130101")}, index=range(1, 2)) - result = df1._append(df2) - expected = DataFrame( - {"bar": Series([np.nan, Timestamp("20130101")], dtype="M8[ns]")} - ) - if using_array_manager: - # With ArrayManager, all-NaN float is not ignored - expected = expected.astype(object) - tm.assert_frame_equal(result, expected) - - df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(1)) - df2 = DataFrame({"bar": 1}, index=range(1, 2), dtype=object) - result = df1._append(df2) - expected = DataFrame({"bar": Series([Timestamp("20130101"), 1])}) - tm.assert_frame_equal(result, expected) - - @pytest.mark.parametrize( - "timestamp", ["2019-07-19 07:04:57+0100", "2019-07-19 07:04:57"] - ) - def test_append_timestamps_aware_or_naive(self, tz_naive_fixture, timestamp): - # GH 30238 - tz = tz_naive_fixture - df = DataFrame([Timestamp(timestamp, tz=tz)]) - result = df._append(df.iloc[0]).iloc[-1] - expected = Series(Timestamp(timestamp, tz=tz), name=0) - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize( - "data, dtype", - [ - ([1], pd.Int64Dtype()), - ([1], pd.CategoricalDtype()), - ([pd.Interval(left=0, right=5)], pd.IntervalDtype()), - ([pd.Period("2000-03", freq="M")], pd.PeriodDtype("M")), - ([1], pd.SparseDtype()), - ], - ) - def test_other_dtypes(self, data, dtype, using_array_manager): - df = DataFrame(data, dtype=dtype) - - result = df._append(df.iloc[0]).iloc[-1] - - expected = Series(data, name=0, dtype=dtype) - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize("dtype", ["datetime64[ns]", "timedelta64[ns]"]) - def test_append_numpy_bug_1681(self, dtype): - # another datetime64 bug - if dtype == "datetime64[ns]": - index = date_range("2011/1/1", "2012/1/1", freq="W-FRI") - else: - index = timedelta_range("1 days", "10 days", freq="2D") - - df = DataFrame() - other = DataFrame({"A": "foo", "B": index}, index=index) - - result = df._append(other) - assert (result["B"] == index).all() - - @pytest.mark.filterwarnings("ignore:The values in the array:RuntimeWarning") - def test_multiindex_column_append_multiple(self): - # GH 29699 - df = DataFrame( - [[1, 11], [2, 12], [3, 13]], - columns=pd.MultiIndex.from_tuples( - [("multi", "col1"), ("multi", "col2")], names=["level1", None] - ), - ) - df2 = df.copy() - for i in range(1, 10): - df[i, "colA"] = 10 - df = df._append(df2, ignore_index=True) - result = df["multi"] - expected = DataFrame( - {"col1": [1, 2, 3] * (i + 1), "col2": [11, 12, 13] * (i + 1)} - ) - tm.assert_frame_equal(result, expected) - - def test_append_raises_future_warning(self): - # GH#35407 - df1 = DataFrame([[1, 2], [3, 4]]) - df2 = DataFrame([[5, 6], [7, 8]]) - with tm.assert_produces_warning(FutureWarning): - df1.append(df2) diff --git a/pandas/tests/generic/test_finalize.py b/pandas/tests/generic/test_finalize.py index 7634f783117d6..689caffe98a2d 100644 --- a/pandas/tests/generic/test_finalize.py +++ b/pandas/tests/generic/test_finalize.py @@ -161,26 +161,6 @@ pytest.param( (pd.DataFrame, frame_data, operator.methodcaller("applymap", lambda x: x)) ), - pytest.param( - ( - pd.DataFrame, - frame_data, - operator.methodcaller("append", pd.DataFrame({"A": [1]})), - ), - marks=pytest.mark.filterwarnings( - "ignore:.*append method is deprecated.*:FutureWarning" - ), - ), - pytest.param( - ( - pd.DataFrame, - frame_data, - operator.methodcaller("append", pd.DataFrame({"B": [1]})), - ), - marks=pytest.mark.filterwarnings( - "ignore:.*append method is deprecated.*:FutureWarning" - ), - ), pytest.param( ( pd.DataFrame, diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py index fa9cf5215c0f7..eda27787afe1c 100644 --- a/pandas/tests/series/indexing/test_indexing.py +++ b/pandas/tests/series/indexing/test_indexing.py @@ -84,7 +84,6 @@ def test_getitem_setitem_ellipsis(): assert (result == 5).all() -@pytest.mark.filterwarnings("ignore:.*append method is deprecated.*:FutureWarning") @pytest.mark.parametrize( "result_1, duplicate_item, expected_1", [ @@ -102,8 +101,8 @@ def test_getitem_setitem_ellipsis(): ) def test_getitem_with_duplicates_indices(result_1, duplicate_item, expected_1): # GH 17610 - result = result_1.append(duplicate_item) - expected = expected_1.append(duplicate_item) + result = result_1._append(duplicate_item) + expected = expected_1._append(duplicate_item) tm.assert_series_equal(result[1], expected) assert result[2] == result_1[2] diff --git a/pandas/tests/series/methods/test_append.py b/pandas/tests/series/methods/test_append.py deleted file mode 100644 index 6f8852ade6408..0000000000000 --- a/pandas/tests/series/methods/test_append.py +++ /dev/null @@ -1,271 +0,0 @@ -import numpy as np -import pytest - -import pandas as pd -from pandas import ( - DataFrame, - DatetimeIndex, - Index, - Series, - Timestamp, - date_range, -) -import pandas._testing as tm - - -class TestSeriesAppend: - def test_append_preserve_name(self, datetime_series): - result = datetime_series[:5]._append(datetime_series[5:]) - assert result.name == datetime_series.name - - def test_append(self, datetime_series, string_series, object_series): - appended_series = string_series._append(object_series) - for idx, value in appended_series.items(): - if idx in string_series.index: - assert value == string_series[idx] - elif idx in object_series.index: - assert value == object_series[idx] - else: - raise AssertionError("orphaned index!") - - msg = "Indexes have overlapping values:" - with pytest.raises(ValueError, match=msg): - datetime_series._append(datetime_series, verify_integrity=True) - - def test_append_many(self, datetime_series): - pieces = [datetime_series[:5], datetime_series[5:10], datetime_series[10:]] - - result = pieces[0]._append(pieces[1:]) - tm.assert_series_equal(result, datetime_series) - - def test_append_duplicates(self): - # GH 13677 - s1 = Series([1, 2, 3]) - s2 = Series([4, 5, 6]) - exp = Series([1, 2, 3, 4, 5, 6], index=[0, 1, 2, 0, 1, 2]) - tm.assert_series_equal(s1._append(s2), exp) - tm.assert_series_equal(pd.concat([s1, s2]), exp) - - # the result must have RangeIndex - exp = Series([1, 2, 3, 4, 5, 6]) - tm.assert_series_equal( - s1._append(s2, ignore_index=True), exp, check_index_type=True - ) - tm.assert_series_equal( - pd.concat([s1, s2], ignore_index=True), exp, check_index_type=True - ) - - msg = "Indexes have overlapping values:" - with pytest.raises(ValueError, match=msg): - s1._append(s2, verify_integrity=True) - with pytest.raises(ValueError, match=msg): - pd.concat([s1, s2], verify_integrity=True) - - def test_append_tuples(self): - # GH 28410 - s = Series([1, 2, 3]) - list_input = [s, s] - tuple_input = (s, s) - - expected = s._append(list_input) - result = s._append(tuple_input) - - tm.assert_series_equal(expected, result) - - def test_append_dataframe_raises(self): - # GH 31413 - df = DataFrame({"A": [1, 2], "B": [3, 4]}) - - msg = "to_append should be a Series or list/tuple of Series, got DataFrame" - with pytest.raises(TypeError, match=msg): - df.A._append(df) - with pytest.raises(TypeError, match=msg): - df.A._append([df]) - - def test_append_raises_future_warning(self): - # GH#35407 - with tm.assert_produces_warning(FutureWarning): - Series([1, 2]).append(Series([3, 4])) - - -class TestSeriesAppendWithDatetimeIndex: - def test_append(self): - rng = date_range("5/8/2012 1:45", periods=10, freq="5T") - ts = Series(np.random.randn(len(rng)), rng) - df = DataFrame(np.random.randn(len(rng), 4), index=rng) - - result = ts._append(ts) - result_df = df._append(df) - ex_index = DatetimeIndex(np.tile(rng.values, 2)) - tm.assert_index_equal(result.index, ex_index) - tm.assert_index_equal(result_df.index, ex_index) - - appended = rng.append(rng) - tm.assert_index_equal(appended, ex_index) - - appended = rng.append([rng, rng]) - ex_index = DatetimeIndex(np.tile(rng.values, 3)) - tm.assert_index_equal(appended, ex_index) - - # different index names - rng1 = rng.copy() - rng2 = rng.copy() - rng1.name = "foo" - rng2.name = "bar" - - assert rng1.append(rng1).name == "foo" - assert rng1.append(rng2).name is None - - def test_append_tz(self): - # see gh-2938 - rng = date_range("5/8/2012 1:45", periods=10, freq="5T", tz="US/Eastern") - rng2 = date_range("5/8/2012 2:35", periods=10, freq="5T", tz="US/Eastern") - rng3 = date_range("5/8/2012 1:45", periods=20, freq="5T", tz="US/Eastern") - ts = Series(np.random.randn(len(rng)), rng) - df = DataFrame(np.random.randn(len(rng), 4), index=rng) - ts2 = Series(np.random.randn(len(rng2)), rng2) - df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2) - - result = ts._append(ts2) - result_df = df._append(df2) - tm.assert_index_equal(result.index, rng3) - tm.assert_index_equal(result_df.index, rng3) - - appended = rng.append(rng2) - tm.assert_index_equal(appended, rng3) - - def test_append_tz_explicit_pytz(self): - # see gh-2938 - from pytz import timezone as timezone - - rng = date_range( - "5/8/2012 1:45", periods=10, freq="5T", tz=timezone("US/Eastern") - ) - rng2 = date_range( - "5/8/2012 2:35", periods=10, freq="5T", tz=timezone("US/Eastern") - ) - rng3 = date_range( - "5/8/2012 1:45", periods=20, freq="5T", tz=timezone("US/Eastern") - ) - ts = Series(np.random.randn(len(rng)), rng) - df = DataFrame(np.random.randn(len(rng), 4), index=rng) - ts2 = Series(np.random.randn(len(rng2)), rng2) - df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2) - - result = ts._append(ts2) - result_df = df._append(df2) - tm.assert_index_equal(result.index, rng3) - tm.assert_index_equal(result_df.index, rng3) - - appended = rng.append(rng2) - tm.assert_index_equal(appended, rng3) - - def test_append_tz_dateutil(self): - # see gh-2938 - rng = date_range( - "5/8/2012 1:45", periods=10, freq="5T", tz="dateutil/US/Eastern" - ) - rng2 = date_range( - "5/8/2012 2:35", periods=10, freq="5T", tz="dateutil/US/Eastern" - ) - rng3 = date_range( - "5/8/2012 1:45", periods=20, freq="5T", tz="dateutil/US/Eastern" - ) - ts = Series(np.random.randn(len(rng)), rng) - df = DataFrame(np.random.randn(len(rng), 4), index=rng) - ts2 = Series(np.random.randn(len(rng2)), rng2) - df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2) - - result = ts._append(ts2) - result_df = df._append(df2) - tm.assert_index_equal(result.index, rng3) - tm.assert_index_equal(result_df.index, rng3) - - appended = rng.append(rng2) - tm.assert_index_equal(appended, rng3) - - def test_series_append_aware(self): - rng1 = date_range("1/1/2011 01:00", periods=1, freq="H", tz="US/Eastern") - rng2 = date_range("1/1/2011 02:00", periods=1, freq="H", tz="US/Eastern") - ser1 = Series([1], index=rng1) - ser2 = Series([2], index=rng2) - ts_result = ser1._append(ser2) - - exp_index = DatetimeIndex( - ["2011-01-01 01:00", "2011-01-01 02:00"], tz="US/Eastern", freq="H" - ) - exp = Series([1, 2], index=exp_index) - tm.assert_series_equal(ts_result, exp) - assert ts_result.index.tz == rng1.tz - - rng1 = date_range("1/1/2011 01:00", periods=1, freq="H", tz="UTC") - rng2 = date_range("1/1/2011 02:00", periods=1, freq="H", tz="UTC") - ser1 = Series([1], index=rng1) - ser2 = Series([2], index=rng2) - ts_result = ser1._append(ser2) - - exp_index = DatetimeIndex( - ["2011-01-01 01:00", "2011-01-01 02:00"], tz="UTC", freq="H" - ) - exp = Series([1, 2], index=exp_index) - tm.assert_series_equal(ts_result, exp) - utc = rng1.tz - assert utc == ts_result.index.tz - - # GH#7795 - # different tz coerces to object dtype, not UTC - rng1 = date_range("1/1/2011 01:00", periods=1, freq="H", tz="US/Eastern") - rng2 = date_range("1/1/2011 02:00", periods=1, freq="H", tz="US/Central") - ser1 = Series([1], index=rng1) - ser2 = Series([2], index=rng2) - ts_result = ser1._append(ser2) - exp_index = Index( - [ - Timestamp("1/1/2011 01:00", tz="US/Eastern"), - Timestamp("1/1/2011 02:00", tz="US/Central"), - ] - ) - exp = Series([1, 2], index=exp_index) - tm.assert_series_equal(ts_result, exp) - - def test_series_append_aware_naive(self): - rng1 = date_range("1/1/2011 01:00", periods=1, freq="H") - rng2 = date_range("1/1/2011 02:00", periods=1, freq="H", tz="US/Eastern") - ser1 = Series(np.random.randn(len(rng1)), index=rng1) - ser2 = Series(np.random.randn(len(rng2)), index=rng2) - ts_result = ser1._append(ser2) - - expected = ser1.index.astype(object).append(ser2.index.astype(object)) - assert ts_result.index.equals(expected) - - # mixed - rng1 = date_range("1/1/2011 01:00", periods=1, freq="H") - rng2 = range(100) - ser1 = Series(np.random.randn(len(rng1)), index=rng1) - ser2 = Series(np.random.randn(len(rng2)), index=rng2) - ts_result = ser1._append(ser2) - - expected = ser1.index.astype(object).append(ser2.index) - assert ts_result.index.equals(expected) - - def test_series_append_dst(self): - rng1 = date_range("1/1/2016 01:00", periods=3, freq="H", tz="US/Eastern") - rng2 = date_range("8/1/2016 01:00", periods=3, freq="H", tz="US/Eastern") - ser1 = Series([1, 2, 3], index=rng1) - ser2 = Series([10, 11, 12], index=rng2) - ts_result = ser1._append(ser2) - - exp_index = DatetimeIndex( - [ - "2016-01-01 01:00", - "2016-01-01 02:00", - "2016-01-01 03:00", - "2016-08-01 01:00", - "2016-08-01 02:00", - "2016-08-01 03:00", - ], - tz="US/Eastern", - ) - exp = Series([1, 2, 3, 10, 11, 12], index=exp_index) - tm.assert_series_equal(ts_result, exp) - assert ts_result.index.tz == rng1.tz
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/49481
2022-11-02T20:20:15Z
2022-11-04T20:30:43Z
2022-11-04T20:30:43Z
2022-11-04T20:49:34Z
enable pylint: consider-using-sys-exit
diff --git a/pandas/tests/io/generate_legacy_storage_files.py b/pandas/tests/io/generate_legacy_storage_files.py index b66631a7d943e..ba1c1cbea26f9 100644 --- a/pandas/tests/io/generate_legacy_storage_files.py +++ b/pandas/tests/io/generate_legacy_storage_files.py @@ -327,7 +327,7 @@ def write_legacy_file(): sys.path.insert(0, ".") if not 3 <= len(sys.argv) <= 4: - exit( + sys.exit( "Specify output directory and storage type: generate_legacy_" "storage_files.py <output_dir> <storage_type> " ) @@ -338,7 +338,7 @@ def write_legacy_file(): if storage_type == "pickle": write_legacy_pickles(output_dir=output_dir) else: - exit("storage_type must be one of {'pickle'}") + sys.exit("storage_type must be one of {'pickle'}") if __name__ == "__main__": diff --git a/pyproject.toml b/pyproject.toml index 761f3c687d08d..97f0df8495e2e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -107,7 +107,6 @@ disable = [ "comparison-with-itself", "consider-merging-isinstance", "consider-using-min-builtin", - "consider-using-sys-exit", "consider-using-ternary", "consider-using-with", "cyclic-import",
Issue #48855. This PR enables pylint type "R" warning: `consider-using-sys-exit`. - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
https://api.github.com/repos/pandas-dev/pandas/pulls/49480
2022-11-02T19:42:06Z
2022-11-03T16:30:45Z
2022-11-03T16:30:45Z
2022-11-03T16:31:02Z
DEPR Series[td64].fillna(incompatible)
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index b7995dca0a825..7a4db76a3e046 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -408,6 +408,7 @@ Removal of prior version deprecations/changes - Changed behavior of setitem-like operations (``__setitem__``, ``fillna``, ``where``, ``mask``, ``replace``, ``insert``, fill_value for ``shift``) on an object with :class:`DatetimeTZDtype` when using a value with a non-matching timezone, the value will be cast to the object's timezone instead of casting both to object-dtype (:issue:`44243`) - Changed behavior of :class:`Index`, :class:`Series`, :class:`DataFrame` constructors with floating-dtype data and a :class:`DatetimeTZDtype`, the data are now interpreted as UTC-times instead of wall-times, consistent with how integer-dtype data are treated (:issue:`45573`) - Removed the deprecated ``base`` and ``loffset`` arguments from :meth:`pandas.DataFrame.resample`, :meth:`pandas.Series.resample` and :class:`pandas.Grouper`. Use ``offset`` or ``origin`` instead (:issue:`31809`) +- Changed behavior of :meth:`Series.fillna` and :meth:`DataFrame.fillna` with ``timedelta64[ns]`` dtype and an incompatible ``fill_value``; this now casts to ``object`` dtype instead of raising, consistent with the behavior with other dtypes (:issue:`45746`) - Change the default argument of ``regex`` for :meth:`Series.str.replace` from ``True`` to ``False``. Additionally, a single character ``pat`` with ``regex=True`` is now treated as a regular expression instead of a string literal. (:issue:`36695`, :issue:`24804`) - Changed behavior of :meth:`DataFrame.any` and :meth:`DataFrame.all` with ``bool_only=True``; object-dtype columns with all-bool values will no longer be included, manually cast to ``bool`` dtype first (:issue:`46188`) - Changed behavior of comparison of a :class:`Timestamp` with a ``datetime.date`` object; these now compare as un-equal and raise on inequality comparisons, matching the ``datetime.datetime`` behavior (:issue:`36131`) diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 5fb82f1f9b72d..f06d118538c1a 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -4,7 +4,6 @@ Any, Hashable, ) -import warnings import numpy as np @@ -18,7 +17,6 @@ cache_readonly, doc, ) -from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_categorical_dtype, diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 4cc8a12de6821..5013ace4b8af3 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -11,7 +11,6 @@ cast, final, ) -import warnings import numpy as np @@ -36,7 +35,6 @@ ) from pandas.errors import AbstractMethodError from pandas.util._decorators import cache_readonly -from pandas.util._exceptions import find_stack_level from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.astype import astype_array_safe @@ -1545,35 +1543,6 @@ def putmask(self, mask, new) -> list[Block]: return [self] - def fillna( - self, value, limit: int | None = None, inplace: bool = False, downcast=None - ) -> list[Block]: - # Caller is responsible for validating limit; if int it is strictly positive - - if self.dtype.kind == "m": - try: - res_values = self.values.fillna(value, limit=limit) - except (ValueError, TypeError): - # GH#45746 - warnings.warn( - "The behavior of fillna with timedelta64[ns] dtype and " - f"an incompatible value ({type(value)}) is deprecated. " - "In a future version, this will cast to a common dtype " - "(usually object) instead of raising, matching the " - "behavior of other dtypes.", - FutureWarning, - stacklevel=find_stack_level(), - ) - raise - else: - res_blk = self.make_block(res_values) - return [res_blk] - - # TODO: since this now dispatches to super, which in turn dispatches - # to putmask, it may *actually* respect 'inplace=True'. If so, add - # tests for this. - return super().fillna(value, limit=limit, inplace=inplace, downcast=downcast) - def delete(self, loc) -> Block: # This will be unnecessary if/when __array_function__ is implemented values = self.values.delete(loc) diff --git a/pandas/tests/series/methods/test_fillna.py b/pandas/tests/series/methods/test_fillna.py index caa14a440d04c..2444c8e67bf4b 100644 --- a/pandas/tests/series/methods/test_fillna.py +++ b/pandas/tests/series/methods/test_fillna.py @@ -243,13 +243,12 @@ def test_timedelta_fillna(self, frame_or_series): expected = frame_or_series(expected) tm.assert_equal(result, expected) - # interpreted as seconds, no longer supported - msg = "value should be a 'Timedelta', 'NaT', or array of those. Got 'int'" - wmsg = "In a future version, this will cast to a common dtype" - with pytest.raises(TypeError, match=msg): - with tm.assert_produces_warning(FutureWarning, match=wmsg): - # GH#45746 - obj.fillna(1) + # GH#45746 pre-1.? ints were interpreted as seconds. then that was + # deprecated and changed to raise. In 2.0 it casts to common dtype, + # consistent with every other dtype's behavior + res = obj.fillna(1) + expected = obj.astype(object).fillna(1) + tm.assert_equal(res, expected) result = obj.fillna(Timedelta(seconds=1)) expected = Series(
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/49479
2022-11-02T18:47:40Z
2022-11-04T17:20:39Z
2022-11-04T17:20:39Z
2022-11-04T17:24:29Z
CLN use default_index more
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index c6b5816d12061..5d9732deeae4a 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -447,6 +447,7 @@ Performance improvements - Performance improvements to :func:`read_sas` (:issue:`47403`, :issue:`47405`, :issue:`47656`, :issue:`48502`) - Memory improvement in :meth:`RangeIndex.sort_values` (:issue:`48801`) - Performance improvement in :class:`DataFrameGroupBy` and :class:`SeriesGroupBy` when ``by`` is a categorical type and ``sort=False`` (:issue:`48976`) +- Performance improvement in :func:`merge` when not merging on the index - the new index will now be :class:`RangeIndex` instead of :class:`Int64Index` (:issue:`49478`) .. --------------------------------------------------------------------------- .. _whatsnew_200.bug_fixes: diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index cea9aaf70ccd0..e4e20ef98224c 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -95,6 +95,7 @@ Index, MultiIndex, all_indexes_same, + default_index, ) from pandas.core.indexes.category import CategoricalIndex from pandas.core.series import Series @@ -1159,7 +1160,7 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs) if not self.as_index: self._insert_inaxis_grouper_inplace(result) - result.index = Index(range(len(result))) + result.index = default_index(len(result)) return result @@ -1778,7 +1779,7 @@ def nunique(self, dropna: bool = True) -> DataFrame: ) if not self.as_index: - results.index = Index(range(len(results))) + results.index = default_index(len(results)) self._insert_inaxis_grouper_inplace(results) return results diff --git a/pandas/core/reshape/encoding.py b/pandas/core/reshape/encoding.py index a39e3c1f10956..ec077caeef69e 100644 --- a/pandas/core/reshape/encoding.py +++ b/pandas/core/reshape/encoding.py @@ -21,7 +21,10 @@ from pandas.core.arrays import SparseArray from pandas.core.arrays.categorical import factorize_from_iterable from pandas.core.frame import DataFrame -from pandas.core.indexes.api import Index +from pandas.core.indexes.api import ( + Index, + default_index, +) from pandas.core.series import Series @@ -249,7 +252,7 @@ def get_empty_frame(data) -> DataFrame: if isinstance(data, Series): index = data.index else: - index = Index(range(len(data))) + index = default_index(len(data)) return DataFrame(index=index) # if all NaN diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 3f98ab16c6797..2600dbf249e30 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -84,6 +84,7 @@ import pandas.core.common as com from pandas.core.construction import extract_array from pandas.core.frame import _merge_doc +from pandas.core.indexes.api import default_index from pandas.core.sorting import is_int64_overflow_possible if TYPE_CHECKING: @@ -1060,7 +1061,7 @@ def _get_join_info( else: join_index = self.left.index.take(left_indexer) else: - join_index = Index(np.arange(len(left_indexer))) + join_index = default_index(len(left_indexer)) if len(join_index) == 0: join_index = join_index.astype(object)
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/49478
2022-11-02T16:13:52Z
2022-11-04T20:52:03Z
2022-11-04T20:52:03Z
2022-11-04T20:52:09Z
CI maybe fix arm test
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index d2c2811759e7c..1265bb469d7eb 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -2231,8 +2231,10 @@ def maybe_convert_dtype(data, copy: bool, tz: tzinfo | None = None): if is_float_dtype(data.dtype): # pre-2.0 we treated these as wall-times, inconsistent with ints - # GH#23675, GH#45573 deprecated to treat symmetrically with integer dtypes - data = data.astype(np.int64) + # GH#23675, GH#45573 deprecated to treat symmetrically with integer dtypes. + # Note: data.astype(np.int64) fails ARM tests, see + # https://github.com/pandas-dev/pandas/issues/49468. + data = data.astype("M8[ns]").view("i8") copy = False elif is_timedelta64_dtype(data.dtype) or is_bool_dtype(data.dtype):
- [ ] closes #49468 (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/49476
2022-11-02T15:30:05Z
2022-11-02T21:00:15Z
2022-11-02T21:00:15Z
2022-11-02T21:00:24Z
TST: avoid chained assignment in tests outside of specific tests on chaining
diff --git a/pandas/tests/apply/test_frame_apply.py b/pandas/tests/apply/test_frame_apply.py index 510d4ab702fdd..83b9098f01452 100644 --- a/pandas/tests/apply/test_frame_apply.py +++ b/pandas/tests/apply/test_frame_apply.py @@ -347,7 +347,7 @@ def test_apply_yield_list(float_frame): def test_apply_reduce_Series(float_frame): - float_frame["A"].iloc[::2] = np.nan + float_frame.iloc[::2, float_frame.columns.get_loc("A")] = np.nan expected = float_frame.mean(1) result = float_frame.apply(np.mean, axis=1) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py index 1c08a37c58e4e..8331bed881ce1 100644 --- a/pandas/tests/frame/indexing/test_setitem.py +++ b/pandas/tests/frame/indexing/test_setitem.py @@ -1132,7 +1132,7 @@ def test_setitem_always_copy(self, float_frame): s = float_frame["A"].copy() float_frame["E"] = s - float_frame["E"][5:10] = np.nan + float_frame.iloc[5:10, float_frame.columns.get_loc("E")] = np.nan assert notna(s[5:10]).all() @pytest.mark.parametrize("consolidate", [True, False]) diff --git a/pandas/tests/frame/methods/test_cov_corr.py b/pandas/tests/frame/methods/test_cov_corr.py index 25ef49718fbe7..445b90327ed2c 100644 --- a/pandas/tests/frame/methods/test_cov_corr.py +++ b/pandas/tests/frame/methods/test_cov_corr.py @@ -107,9 +107,9 @@ class TestDataFrameCorr: @pytest.mark.parametrize("method", ["pearson", "kendall", "spearman"]) @td.skip_if_no_scipy def test_corr_scipy_method(self, float_frame, method): - float_frame["A"][:5] = np.nan - float_frame["B"][5:10] = np.nan - float_frame["A"][:10] = float_frame["A"][10:20] + float_frame.loc[float_frame.index[:5], "A"] = np.nan + float_frame.loc[float_frame.index[5:10], "B"] = np.nan + float_frame.loc[float_frame.index[:10], "A"] = float_frame["A"][10:20] correls = float_frame.corr(method=method) expected = float_frame["A"].corr(float_frame["C"], method=method) diff --git a/pandas/tests/frame/methods/test_fillna.py b/pandas/tests/frame/methods/test_fillna.py index 869cd32aa9ef9..94831da910150 100644 --- a/pandas/tests/frame/methods/test_fillna.py +++ b/pandas/tests/frame/methods/test_fillna.py @@ -392,16 +392,16 @@ def test_fillna_datetime_columns(self): tm.assert_frame_equal(result, expected) def test_ffill(self, datetime_frame): - datetime_frame["A"][:5] = np.nan - datetime_frame["A"][-5:] = np.nan + datetime_frame.loc[datetime_frame.index[:5], "A"] = np.nan + datetime_frame.loc[datetime_frame.index[-5:], "A"] = np.nan tm.assert_frame_equal( datetime_frame.ffill(), datetime_frame.fillna(method="ffill") ) def test_bfill(self, datetime_frame): - datetime_frame["A"][:5] = np.nan - datetime_frame["A"][-5:] = np.nan + datetime_frame.loc[datetime_frame.index[:5], "A"] = np.nan + datetime_frame.loc[datetime_frame.index[-5:], "A"] = np.nan tm.assert_frame_equal( datetime_frame.bfill(), datetime_frame.fillna(method="bfill") @@ -467,8 +467,8 @@ def test_fillna_integer_limit(self, type): def test_fillna_inplace(self): df = DataFrame(np.random.randn(10, 4)) - df[1][:4] = np.nan - df[3][-4:] = np.nan + df.loc[:4, 1] = np.nan + df.loc[-4:, 3] = np.nan expected = df.fillna(value=0) assert expected is not df @@ -479,8 +479,8 @@ def test_fillna_inplace(self): expected = df.fillna(value={0: 0}, inplace=True) assert expected is None - df[1][:4] = np.nan - df[3][-4:] = np.nan + df.loc[:4, 1] = np.nan + df.loc[-4:, 3] = np.nan expected = df.fillna(method="ffill") assert expected is not df diff --git a/pandas/tests/frame/methods/test_rank.py b/pandas/tests/frame/methods/test_rank.py index 7b2f7908673e3..1f5cb95885004 100644 --- a/pandas/tests/frame/methods/test_rank.py +++ b/pandas/tests/frame/methods/test_rank.py @@ -43,10 +43,10 @@ def test_rank(self, float_frame): import scipy.stats # noqa:F401 from scipy.stats import rankdata - float_frame["A"][::2] = np.nan - float_frame["B"][::3] = np.nan - float_frame["C"][::4] = np.nan - float_frame["D"][::5] = np.nan + float_frame.loc[::2, "A"] = np.nan + float_frame.loc[::3, "B"] = np.nan + float_frame.loc[::4, "C"] = np.nan + float_frame.loc[::5, "D"] = np.nan ranks0 = float_frame.rank() ranks1 = float_frame.rank(1) @@ -148,10 +148,10 @@ def test_rank_na_option(self, float_frame): import scipy.stats # noqa:F401 from scipy.stats import rankdata - float_frame["A"][::2] = np.nan - float_frame["B"][::3] = np.nan - float_frame["C"][::4] = np.nan - float_frame["D"][::5] = np.nan + float_frame.loc[::2, "A"] = np.nan + float_frame.loc[::3, "B"] = np.nan + float_frame.loc[::4, "C"] = np.nan + float_frame.loc[::5, "D"] = np.nan # bottom ranks0 = float_frame.rank(na_option="bottom") diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index 9eaba56a23e0f..15105ceedd0d5 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -29,8 +29,8 @@ def mix_abc() -> dict[str, list[float | str]]: class TestDataFrameReplace: def test_replace_inplace(self, datetime_frame, float_string_frame): - datetime_frame["A"][:5] = np.nan - datetime_frame["A"][-5:] = np.nan + datetime_frame.loc[datetime_frame.index[:5], "A"] = np.nan + datetime_frame.loc[datetime_frame.index[-5:], "A"] = np.nan tsframe = datetime_frame.copy() return_value = tsframe.replace(np.nan, 0, inplace=True) @@ -420,16 +420,16 @@ def test_regex_replace_string_types( tm.assert_equal(result, expected) def test_replace(self, datetime_frame): - datetime_frame["A"][:5] = np.nan - datetime_frame["A"][-5:] = np.nan + datetime_frame.loc[datetime_frame.index[:5], "A"] = np.nan + datetime_frame.loc[datetime_frame.index[-5:], "A"] = np.nan zero_filled = datetime_frame.replace(np.nan, -1e8) tm.assert_frame_equal(zero_filled, datetime_frame.fillna(-1e8)) tm.assert_frame_equal(zero_filled.replace(-1e8, np.nan), datetime_frame) - datetime_frame["A"][:5] = np.nan - datetime_frame["A"][-5:] = np.nan - datetime_frame["B"][:5] = -1e8 + datetime_frame.loc[datetime_frame.index[:5], "A"] = np.nan + datetime_frame.loc[datetime_frame.index[-5:], "A"] = np.nan + datetime_frame.loc[datetime_frame.index[:5], "B"] = -1e8 # empty df = DataFrame(index=["a", "b"]) @@ -716,16 +716,16 @@ def test_replace_for_new_dtypes(self, datetime_frame): # dtypes tsframe = datetime_frame.copy().astype(np.float32) - tsframe["A"][:5] = np.nan - tsframe["A"][-5:] = np.nan + tsframe.loc[tsframe.index[:5], "A"] = np.nan + tsframe.loc[tsframe.index[-5:], "A"] = np.nan zero_filled = tsframe.replace(np.nan, -1e8) tm.assert_frame_equal(zero_filled, tsframe.fillna(-1e8)) tm.assert_frame_equal(zero_filled.replace(-1e8, np.nan), tsframe) - tsframe["A"][:5] = np.nan - tsframe["A"][-5:] = np.nan - tsframe["B"][:5] = -1e8 + tsframe.loc[tsframe.index[:5], "A"] = np.nan + tsframe.loc[tsframe.index[-5:], "A"] = np.nan + tsframe.loc[tsframe.index[:5], "B"] = -1e8 b = tsframe["B"] b[b == -1e8] = np.nan diff --git a/pandas/tests/frame/methods/test_to_csv.py b/pandas/tests/frame/methods/test_to_csv.py index 1933278efb443..7487b2c70a264 100644 --- a/pandas/tests/frame/methods/test_to_csv.py +++ b/pandas/tests/frame/methods/test_to_csv.py @@ -35,7 +35,7 @@ def read_csv(self, path, **kwargs): def test_to_csv_from_csv1(self, float_frame, datetime_frame): with tm.ensure_clean("__tmp_to_csv_from_csv1__") as path: - float_frame["A"][:5] = np.nan + float_frame.iloc[:5, float_frame.columns.get_loc("A")] = np.nan float_frame.to_csv(path) float_frame.to_csv(path, columns=["A", "B"]) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 5a83c4997b33c..54cd39df54c57 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -718,7 +718,8 @@ def test_constructor_defaultdict(self, float_frame): from collections import defaultdict data = {} - float_frame["B"][:10] = np.nan + float_frame.loc[: float_frame.index[10], "B"] = np.nan + for k, v in float_frame.items(): dct = defaultdict(dict) dct.update(v.to_dict()) @@ -2203,7 +2204,9 @@ def test_constructor_series_copy(self, float_frame): series = float_frame._series df = DataFrame({"A": series["A"]}, copy=True) - df["A"][:] = 5 + # TODO can be replaced with `df.loc[:, "A"] = 5` after deprecation about + # inplace mutation is enforced + df.loc[df.index[0] : df.index[-1], "A"] = 5 assert not (series["A"] == 5).all() diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py index 307f8b7a7798f..e1fc8364a89ba 100644 --- a/pandas/tests/io/excel/test_writers.py +++ b/pandas/tests/io/excel/test_writers.py @@ -374,7 +374,7 @@ def test_excel_writer_context_manager(self, frame, path): def test_roundtrip(self, frame, path): frame = frame.copy() - frame["A"][:5] = np.nan + frame.iloc[:5, frame.columns.get_loc("A")] = np.nan frame.to_excel(path, "test1") frame.to_excel(path, "test1", columns=["A", "B"]) @@ -444,7 +444,7 @@ def test_ts_frame(self, tsframe, path): def test_basics_with_nan(self, frame, path): frame = frame.copy() - frame["A"][:5] = np.nan + frame.iloc[:5, frame.columns.get_loc("A")] = np.nan frame.to_excel(path, "test1") frame.to_excel(path, "test1", columns=["A", "B"]) frame.to_excel(path, "test1", header=False) @@ -508,7 +508,7 @@ def test_sheets(self, frame, tsframe, path): tsframe.index = index frame = frame.copy() - frame["A"][:5] = np.nan + frame.iloc[:5, frame.columns.get_loc("A")] = np.nan frame.to_excel(path, "test1") frame.to_excel(path, "test1", columns=["A", "B"]) @@ -530,7 +530,7 @@ def test_sheets(self, frame, tsframe, path): def test_colaliases(self, frame, path): frame = frame.copy() - frame["A"][:5] = np.nan + frame.iloc[:5, frame.columns.get_loc("A")] = np.nan frame.to_excel(path, "test1") frame.to_excel(path, "test1", columns=["A", "B"]) @@ -548,7 +548,7 @@ def test_colaliases(self, frame, path): def test_roundtrip_indexlabels(self, merge_cells, frame, path): frame = frame.copy() - frame["A"][:5] = np.nan + frame.iloc[:5, frame.columns.get_loc("A")] = np.nan frame.to_excel(path, "test1") frame.to_excel(path, "test1", columns=["A", "B"]) diff --git a/pandas/tests/series/methods/test_rank.py b/pandas/tests/series/methods/test_rank.py index 3af06502a3066..3a66bf1adf25b 100644 --- a/pandas/tests/series/methods/test_rank.py +++ b/pandas/tests/series/methods/test_rank.py @@ -44,7 +44,7 @@ def test_rank(self, datetime_series): from scipy.stats import rankdata datetime_series[::2] = np.nan - datetime_series[:10][::3] = 4.0 + datetime_series[:10:3] = 4.0 ranks = datetime_series.rank() oranks = datetime_series.astype("O").rank()
Similarly as https://github.com/pandas-dev/pandas/pull/46980, now splitting off a part of the test changes in https://github.com/pandas-dev/pandas/pull/49467 that can be done separately We have specific tests about chained indexing (in `pandas/tests/indexing`, eg `test_chaining_and_caching.py`), so outside those specific indexing tests, I can think we can avoid using chained indexing (regardless of the new CoW implementation, this would follow our own recommendation on best indexing practices, although it also shows that some cases of mixed positional/label based setting is somewhat convoluted ..)
https://api.github.com/repos/pandas-dev/pandas/pulls/49474
2022-11-02T14:06:57Z
2022-11-03T16:36:30Z
2022-11-03T16:36:30Z
2022-11-03T16:58:55Z
DEPR: enforce passing non boolean sort in concat
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index 14b4df286d989..d0a07e58a925d 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -191,6 +191,7 @@ Removal of prior version deprecations/changes - Enforced deprecation disallowing unit-less "datetime64" dtype in :meth:`Series.astype` and :meth:`DataFrame.astype` (:issue:`47844`) - Enforced deprecation disallowing using ``.astype`` to convert a ``datetime64[ns]`` :class:`Series`, :class:`DataFrame`, or :class:`DatetimeIndex` to timezone-aware dtype, use ``obj.tz_localize`` or ``ser.dt.tz_localize`` instead (:issue:`39258`) - Enforced deprecation disallowing using ``.astype`` to convert a timezone-aware :class:`Series`, :class:`DataFrame`, or :class:`DatetimeIndex` to timezone-naive ``datetime64[ns]`` dtype, use ``obj.tz_localize(None)`` or ``obj.tz_convert("UTC").tz_localize(None)`` instead (:issue:`39258`) +- Enforced deprecation disallowing passing non boolean argument to sort in :func:`concat` (:issue:`44629`) - Removed Date parser functions :func:`~pandas.io.date_converters.parse_date_time`, :func:`~pandas.io.date_converters.parse_date_fields`, :func:`~pandas.io.date_converters.parse_all_fields` and :func:`~pandas.io.date_converters.generic_parser` (:issue:`24518`) diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index 31b6209855561..5ce69d2c2ab4c 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -14,7 +14,6 @@ cast, overload, ) -import warnings import numpy as np @@ -24,7 +23,6 @@ HashableT, ) from pandas.util._decorators import cache_readonly -from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ( @@ -551,11 +549,8 @@ def __init__( self.levels = levels if not is_bool(sort): - warnings.warn( - "Passing non boolean values for sort is deprecated and " - "will error in a future version!", - FutureWarning, - stacklevel=find_stack_level(), + raise ValueError( + f"The 'sort' keyword only accepts boolean values; {sort} was passed." ) self.sort = sort diff --git a/pandas/tests/reshape/concat/test_sort.py b/pandas/tests/reshape/concat/test_sort.py index e83880625f3d6..2724f81958893 100644 --- a/pandas/tests/reshape/concat/test_sort.py +++ b/pandas/tests/reshape/concat/test_sort.py @@ -1,4 +1,5 @@ import numpy as np +import pytest import pandas as pd from pandas import DataFrame @@ -109,8 +110,9 @@ def test_concat_frame_with_sort_false(self): ) tm.assert_frame_equal(result, expected) - def test_concat_sort_none_warning(self): + def test_concat_sort_none_raises(self): # GH#41518 df = DataFrame({1: [1, 2], "a": [3, 4]}) - with tm.assert_produces_warning(FutureWarning, match="sort"): + msg = "The 'sort' keyword only accepts boolean values; None was passed." + with pytest.raises(ValueError, match=msg): pd.concat([df, df], sort=None)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/v2.0.0.rst` file if fixing a bug or adding a new feature. introduced in #44629
https://api.github.com/repos/pandas-dev/pandas/pulls/49472
2022-11-02T11:45:42Z
2022-11-02T17:23:31Z
2022-11-02T17:23:31Z
2022-11-03T01:37:57Z
Backport PR #49460 on branch 1.5.x ((🎁) add python 3.11 to sdist.yml)
diff --git a/.github/workflows/sdist.yml b/.github/workflows/sdist.yml index 14cede7bc1a39..7c20545105009 100644 --- a/.github/workflows/sdist.yml +++ b/.github/workflows/sdist.yml @@ -30,7 +30,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.8", "3.9", "3.10"] + python-version: ["3.8", "3.9", "3.10", "3.11"] concurrency: # https://github.community/t/concurrecy-not-work-for-push/183068/7 group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-${{matrix.python-version}}-sdist @@ -42,7 +42,7 @@ jobs: fetch-depth: 0 - name: Set up Python - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} @@ -86,6 +86,8 @@ jobs: pip install numpy==1.20.3 ;; 3.10) pip install numpy==1.21.2 ;; + 3.11) + pip install numpy==1.23.2 ;; esac - name: Import pandas
Backport PR #49460: (🎁) add python 3.11 to sdist.yml
https://api.github.com/repos/pandas-dev/pandas/pulls/49471
2022-11-02T10:41:59Z
2022-11-02T14:06:44Z
2022-11-02T14:06:44Z
2022-11-02T14:06:45Z
STYLE Suppressing two instances of pylint W0703 broad-except.
diff --git a/pandas/_testing/_io.py b/pandas/_testing/_io.py index 9ad0d13e7317e..bef2730ec630b 100644 --- a/pandas/_testing/_io.py +++ b/pandas/_testing/_io.py @@ -225,7 +225,7 @@ def wrapper(*args, **kwargs): ) try: return t(*args, **kwargs) - except Exception as err: + except Exception as err: # pylint: disable=broad-except errno = getattr(err, "errno", None) if not errno and hasattr(errno, "reason"): # error: "Exception" has no attribute "reason" diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 54fa9629fecd4..4426be266bb69 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -494,7 +494,7 @@ def maybe_cast_to_extension_array( try: result = cls._from_sequence(obj, dtype=dtype) - except Exception: + except Exception: # pylint: disable=broad-except # We can't predict what downstream EA constructors may raise result = obj return result
- [x] partially addresses ability to re-enable pylint checks for `broad-except` from #48855 34 instances of W0703 were found, this should address 2. - Suppressed one warning where there is existing logic to parse returned errors - Suppressed another warning because downstream constructors could raise any error Working on appropriate fixes for the rest and the eventual update to `pyproject.toml` -- will submit separate PRs as I go along.
https://api.github.com/repos/pandas-dev/pandas/pulls/49470
2022-11-02T10:33:09Z
2022-11-02T17:26:55Z
2022-11-02T17:26:55Z
2022-11-02T17:27:09Z
BUG / CoW: also return new object in case of null slice for both rows and columsn (.(i)loc[:, :])
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index 57558491e0e57..6779d47792dbc 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -405,6 +405,10 @@ Other API changes - Files are now closed when encountering an exception in :func:`read_json` (:issue:`49921`) - Changed behavior of :func:`read_csv`, :func:`read_json` & :func:`read_fwf`, where the index will now always be a :class:`RangeIndex`, when no index is specified. Previously the index would be a :class:`Index` with dtype ``object`` if the new DataFrame/Series has length 0 (:issue:`49572`) - :meth:`DataFrame.values`, :meth:`DataFrame.to_numpy`, :meth:`DataFrame.xs`, :meth:`DataFrame.reindex`, :meth:`DataFrame.fillna`, and :meth:`DataFrame.replace` no longer silently consolidate the underlying arrays; do ``df = df.copy()`` to ensure consolidation (:issue:`49356`) +- Creating a new DataFrame using a full slice on both axes with :attr:`~DataFrame.loc` + or :attr:`~DataFrame.iloc` (thus, ``df.loc[:, :]`` or ``df.iloc[:, :]``) now returns a + new DataFrame (shallow copy) instead of the original DataFrame, consistent with other + methods to get a full slice (for example ``df.loc[:]`` or ``df[:]``) (:issue:`49469`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 929a1c4e30a5f..070ec7c7a2e4a 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -934,6 +934,11 @@ def _getitem_tuple_same_dim(self, tup: tuple): # be handled by the _getitem_lowerdim call above. assert retval.ndim == self.ndim + if retval is self.obj: + # if all axes were a null slice (`df.loc[:, :]`), ensure we still + # return a new object (https://github.com/pandas-dev/pandas/pull/49469) + retval = retval.copy(deep=False) + return retval @final diff --git a/pandas/tests/copy_view/test_indexing.py b/pandas/tests/copy_view/test_indexing.py index b8028fd28f8f8..5ebbbcfb5a301 100644 --- a/pandas/tests/copy_view/test_indexing.py +++ b/pandas/tests/copy_view/test_indexing.py @@ -612,6 +612,62 @@ def test_subset_chained_single_block_row(using_copy_on_write, using_array_manage assert subset.iloc[0] == 0 +@pytest.mark.parametrize( + "method", + [ + lambda df: df[:], + lambda df: df.loc[:, :], + lambda df: df.loc[:], + lambda df: df.iloc[:, :], + lambda df: df.iloc[:], + ], + ids=["getitem", "loc", "loc-rows", "iloc", "iloc-rows"], +) +def test_null_slice(request, method, using_copy_on_write): + # Case: also all variants of indexing with a null slice (:) should return + # new objects to ensure we correctly use CoW for the results + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}) + df_orig = df.copy() + + df2 = method(df) + + # we always return new objects (shallow copy), regardless of CoW or not + assert df2 is not df + + # and those trigger CoW when mutated + df2.iloc[0, 0] = 0 + if using_copy_on_write: + tm.assert_frame_equal(df, df_orig) + else: + assert df.iloc[0, 0] == 0 + + +@pytest.mark.parametrize( + "method", + [ + lambda s: s[:], + lambda s: s.loc[:], + lambda s: s.iloc[:], + ], + ids=["getitem", "loc", "iloc"], +) +def test_null_slice_series(request, method, using_copy_on_write): + s = Series([1, 2, 3], index=["a", "b", "c"]) + s_orig = s.copy() + + s2 = method(s) + + # we always return new objects, regardless of CoW or not + assert s2 is not s + + # and those trigger CoW when mutated + s2.iloc[0] = 0 + if using_copy_on_write: + tm.assert_series_equal(s, s_orig) + else: + assert s.iloc[0] == 0 + + # TODO add more tests modifying the parent diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index 11cbcfe231928..6bd0806a42a5a 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -1097,6 +1097,7 @@ def test_identity_slice_returns_new_object( sliced_df = original_df.loc[:] assert sliced_df is not original_df assert original_df[:] is not original_df + assert original_df.loc[:, :] is not original_df # should be a shallow copy assert np.shares_memory(original_df["a"]._values, sliced_df["a"]._values) @@ -1110,7 +1111,6 @@ def test_identity_slice_returns_new_object( assert (sliced_df["a"] == 4).all() # These should not return copies - assert original_df is original_df.loc[:, :] df = DataFrame(np.random.randn(10, 4)) assert df[0] is df.loc[:, 0]
Related to https://github.com/pandas-dev/pandas/pull/49450 (also ensuring we always return new objects from indexing operations, but for a different case). Currently, there is the specific corner case of `df.(i)loc[:, :]` that just returns `df` (this doesn't happen for `df.(i)loc[:]` or `df[:]`, where only a single dimension gets indexed) Given the current inconsistency with `df.loc[:, :]` vs `df.loc[:]`, we could also consider changing this for 2.0 for the default behaviour as well, and not just for when CoW is enabled? - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. xref https://github.com/pandas-dev/pandas/issues/48998
https://api.github.com/repos/pandas-dev/pandas/pulls/49469
2022-11-02T09:50:24Z
2022-12-09T12:11:36Z
2022-12-09T12:11:36Z
2022-12-09T12:11:40Z
API / CoW: detect and raise error for chained assignment under Copy-on-Write
diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index d3ad2710a0efa..6726139ed5fa4 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -73,7 +73,6 @@ jobs: env_file: actions-pypy-38.yaml pattern: "not slow and not network and not single_cpu" test_args: "--max-worker-restart 0" - error_on_warnings: "0" - name: "Numpy Dev" env_file: actions-310-numpydev.yaml pattern: "not slow and not network and not single_cpu" diff --git a/doc/source/reference/testing.rst b/doc/source/reference/testing.rst index 07624e87d82e0..edfafee430d1d 100644 --- a/doc/source/reference/testing.rst +++ b/doc/source/reference/testing.rst @@ -28,6 +28,7 @@ Exceptions and warnings errors.AccessorRegistrationWarning errors.AttributeConflictWarning errors.CategoricalConversionWarning + errors.ChainedAssignmentError errors.ClosedFileError errors.CSSWarning errors.DatabaseError diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index 605f1d4b26e13..513d9494f23cc 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -125,6 +125,14 @@ Copy-on-Write improvements a modification to the data happens) when constructing a Series from an existing Series with the default of ``copy=False`` (:issue:`50471`) +- Trying to set values using chained assignment (for example, ``df["a"][1:3] = 0``) + will now always raise an exception when Copy-on-Write is enabled. In this mode, + chained assignment can never work because we are always setting into a temporary + object that is the result of an indexing operation (getitem), which under + Copy-on-Write always behaves as a copy. Thus, assigning through a chain + can never update the original Series or DataFrame. Therefore, an informative + error is raised to the user instead of silently doing nothing (:issue:`49467`) + Copy-on-Write can be enabled through .. code-block:: python diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py index 6a7a1c7126cd3..eb25566e7983e 100644 --- a/pandas/_testing/__init__.py +++ b/pandas/_testing/__init__.py @@ -103,6 +103,7 @@ decompress_file, ensure_clean, ensure_safe_environment_variables, + raises_chained_assignment_error, set_timezone, use_numexpr, with_csv_dialect, @@ -1125,6 +1126,7 @@ def shares_memory(left, right) -> bool: "rands", "reset_display_options", "RNGContext", + "raises_chained_assignment_error", "round_trip_localpath", "round_trip_pathlib", "round_trip_pickle", diff --git a/pandas/_testing/contexts.py b/pandas/_testing/contexts.py index e5f716c62eca7..d0de085788782 100644 --- a/pandas/_testing/contexts.py +++ b/pandas/_testing/contexts.py @@ -14,6 +14,9 @@ import numpy as np +from pandas.compat import PYPY +from pandas.errors import ChainedAssignmentError + from pandas import set_option from pandas.io.common import get_handle @@ -227,3 +230,21 @@ def __exit__( ) -> None: np.random.set_state(self.start_state) + + +def raises_chained_assignment_error(): + + if PYPY: + from contextlib import nullcontext + + return nullcontext() + else: + import pytest + + return pytest.raises( + ChainedAssignmentError, + match=( + "A value is trying to be set on a copy of a DataFrame or Series " + "through chained assignment" + ), + ) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 9efc07628cccd..685ed9f972a32 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -16,6 +16,7 @@ import functools from io import StringIO import itertools +import sys from textwrap import dedent from typing import ( TYPE_CHECKING, @@ -91,12 +92,17 @@ WriteBuffer, npt, ) +from pandas.compat import PYPY from pandas.compat._optional import import_optional_dependency from pandas.compat.numpy import ( function as nv, np_percentile_argname, ) -from pandas.errors import InvalidIndexError +from pandas.errors import ( + ChainedAssignmentError, + InvalidIndexError, + _chained_assignment_msg, +) from pandas.util._decorators import ( Appender, Substitution, @@ -3862,6 +3868,10 @@ def isetitem(self, loc, value) -> None: self._iset_item_mgr(loc, arraylike, inplace=False) def __setitem__(self, key, value): + if not PYPY and using_copy_on_write(): + if sys.getrefcount(self) <= 3: + raise ChainedAssignmentError(_chained_assignment_msg) + key = com.apply_if_callable(key, self) # see if we can slice the rows diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index a7b19e3180fff..26b5a4077b0ff 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1,6 +1,7 @@ from __future__ import annotations from contextlib import suppress +import sys from typing import ( TYPE_CHECKING, Hashable, @@ -12,17 +13,22 @@ import numpy as np +from pandas._config import using_copy_on_write + from pandas._libs.indexing import NDFrameIndexerBase from pandas._libs.lib import item_from_zerodim from pandas._typing import ( Axis, AxisInt, ) +from pandas.compat import PYPY from pandas.errors import ( AbstractMethodError, + ChainedAssignmentError, IndexingError, InvalidIndexError, LossySetitemError, + _chained_assignment_msg, ) from pandas.util._decorators import doc @@ -830,6 +836,10 @@ def _ensure_listlike_indexer(self, key, axis=None, value=None) -> None: @final def __setitem__(self, key, value) -> None: + if not PYPY and using_copy_on_write(): + if sys.getrefcount(self.obj) <= 2: + raise ChainedAssignmentError(_chained_assignment_msg) + check_dict_or_set_indexers(key) if isinstance(key, tuple): key = tuple(list(x) if is_iterator(x) else x for x in key) diff --git a/pandas/core/series.py b/pandas/core/series.py index c6ba217042353..106cf166acd14 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -3,6 +3,7 @@ """ from __future__ import annotations +import sys from textwrap import dedent from typing import ( IO, @@ -67,8 +68,13 @@ WriteBuffer, npt, ) +from pandas.compat import PYPY from pandas.compat.numpy import function as nv -from pandas.errors import InvalidIndexError +from pandas.errors import ( + ChainedAssignmentError, + InvalidIndexError, + _chained_assignment_msg, +) from pandas.util._decorators import ( Appender, Substitution, @@ -1074,6 +1080,10 @@ def _get_value(self, label, takeable: bool = False): return self.iloc[loc] def __setitem__(self, key, value) -> None: + if not PYPY and using_copy_on_write(): + if sys.getrefcount(self) <= 3: + raise ChainedAssignmentError(_chained_assignment_msg) + check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) cacher_needs_updating = self._check_is_chained_assignment_possible() diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py index 89ac1c10254cb..5a1915956616c 100644 --- a/pandas/errors/__init__.py +++ b/pandas/errors/__init__.py @@ -320,6 +320,42 @@ class SettingWithCopyWarning(Warning): """ +class ChainedAssignmentError(ValueError): + """ + Exception raised when trying to set using chained assignment. + + When the ``mode.copy_on_write`` option is enabled, chained assignment can + never work. In such a situation, we are always setting into a temporary + object that is the result of an indexing operation (getitem), which under + Copy-on-Write always behaves as a copy. Thus, assigning through a chain + can never update the original Series or DataFrame. + + For more information on view vs. copy, + see :ref:`the user guide<indexing.view_versus_copy>`. + + Examples + -------- + >>> pd.options.mode.copy_on_write = True + >>> df = pd.DataFrame({'A': [1, 1, 1, 2, 2]}, columns=['A']) + >>> df["A"][0:3] = 10 # doctest: +SKIP + ... # ChainedAssignmentError: ... + """ + + +_chained_assignment_msg = ( + "A value is trying to be set on a copy of a DataFrame or Series " + "through chained assignment.\n" + "When using the Copy-on-Write mode, such chained assignment never works " + "to update the original DataFrame or Series, because the intermediate " + "object on which we are setting values always behaves as a copy.\n\n" + "Try using '.loc[row_indexer, col_indexer] = value' instead, to perform " + "the assignment in a single step.\n\n" + "See the caveats in the documentation: " + "https://pandas.pydata.org/pandas-docs/stable/user_guide/" + "indexing.html#returning-a-view-versus-a-copy" +) + + class NumExprClobberingError(NameError): """ Exception raised when trying to use a built-in numexpr name as a variable name. diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py index d8d626b3af84a..62f05cb523b1b 100644 --- a/pandas/tests/frame/indexing/test_setitem.py +++ b/pandas/tests/frame/indexing/test_setitem.py @@ -1245,12 +1245,15 @@ def test_setitem_column_update_inplace(self, using_copy_on_write): df = DataFrame({col: np.zeros(len(labels)) for col in labels}, index=labels) values = df._mgr.blocks[0].values - for label in df.columns: - df[label][label] = 1 - if not using_copy_on_write: + for label in df.columns: + df[label][label] = 1 + # diagonal values all updated assert np.all(values[np.arange(10), np.arange(10)] == 1) else: + with tm.raises_chained_assignment_error(): + for label in df.columns: + df[label][label] = 1 # original dataframe not updated assert np.all(values[np.arange(10), np.arange(10)] == 0) diff --git a/pandas/tests/frame/indexing/test_xs.py b/pandas/tests/frame/indexing/test_xs.py index b7549771c7cc5..7e0623f7beaa9 100644 --- a/pandas/tests/frame/indexing/test_xs.py +++ b/pandas/tests/frame/indexing/test_xs.py @@ -124,7 +124,8 @@ def test_xs_view(self, using_array_manager, using_copy_on_write): df_orig = dm.copy() if using_copy_on_write: - dm.xs(2)[:] = 20 + with tm.raises_chained_assignment_error(): + dm.xs(2)[:] = 20 tm.assert_frame_equal(dm, df_orig) elif using_array_manager: # INFO(ArrayManager) with ArrayManager getting a row as a view is diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py index 5fca8d0568a67..04f4766e49227 100644 --- a/pandas/tests/frame/test_block_internals.py +++ b/pandas/tests/frame/test_block_internals.py @@ -340,7 +340,11 @@ def test_stale_cached_series_bug_473(self, using_copy_on_write): ) repr(Y) Y["e"] = Y["e"].astype("object") - Y["g"]["c"] = np.NaN + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + Y["g"]["c"] = np.NaN + else: + Y["g"]["c"] = np.NaN repr(Y) result = Y.sum() # noqa exp = Y["g"].sum() # noqa diff --git a/pandas/tests/indexing/multiindex/test_chaining_and_caching.py b/pandas/tests/indexing/multiindex/test_chaining_and_caching.py index 2efb288a73f8d..932457eebcd8e 100644 --- a/pandas/tests/indexing/multiindex/test_chaining_and_caching.py +++ b/pandas/tests/indexing/multiindex/test_chaining_and_caching.py @@ -50,11 +50,13 @@ def test_cache_updating(using_copy_on_write): # setting via chained assignment # but actually works, since everything is a view - df.loc[0]["z"].iloc[0] = 1.0 - result = df.loc[(0, 0), "z"] if using_copy_on_write: - assert result == df_original.loc[0, "z"] + with tm.raises_chained_assignment_error(): + df.loc[0]["z"].iloc[0] = 1.0 + assert df.loc[(0, 0), "z"] == df_original.loc[0, "z"] else: + df.loc[0]["z"].iloc[0] = 1.0 + result = df.loc[(0, 0), "z"] assert result == 1 # correct setting diff --git a/pandas/tests/indexing/multiindex/test_partial.py b/pandas/tests/indexing/multiindex/test_partial.py index 0c63326118ac3..d34daaf640305 100644 --- a/pandas/tests/indexing/multiindex/test_partial.py +++ b/pandas/tests/indexing/multiindex/test_partial.py @@ -128,9 +128,13 @@ def test_partial_set( exp.iloc[65:85] = 0 tm.assert_frame_equal(df, exp) - df["A"].loc[2000, 4] = 1 - if not using_copy_on_write: - exp["A"].loc[2000, 4].values[:] = 1 + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df["A"].loc[2000, 4] = 1 + df.loc[(2000, 4), "A"] = 1 + else: + df["A"].loc[2000, 4] = 1 + exp.iloc[65:85, 0] = 1 tm.assert_frame_equal(df, exp) df.loc[2000] = 5 @@ -138,10 +142,12 @@ def test_partial_set( tm.assert_frame_equal(df, exp) # this works...for now - df["A"].iloc[14] = 5 if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df["A"].iloc[14] = 5 df["A"].iloc[14] == exp["A"].iloc[14] else: + df["A"].iloc[14] = 5 assert df["A"].iloc[14] == 5 @pytest.mark.parametrize("dtype", [int, float]) diff --git a/pandas/tests/indexing/multiindex/test_setitem.py b/pandas/tests/indexing/multiindex/test_setitem.py index ac10a6d82dc89..3ca057b80e578 100644 --- a/pandas/tests/indexing/multiindex/test_setitem.py +++ b/pandas/tests/indexing/multiindex/test_setitem.py @@ -501,8 +501,8 @@ def test_frame_setitem_copy_raises( # will raise/warn as its chained assignment df = multiindex_dataframe_random_data.T if using_copy_on_write: - # TODO(CoW) it would be nice if this could still warn/raise - df["foo"]["one"] = 2 + with tm.raises_chained_assignment_error(): + df["foo"]["one"] = 2 else: msg = "A value is trying to be set on a copy of a slice from a DataFrame" with pytest.raises(SettingWithCopyError, match=msg): @@ -516,7 +516,8 @@ def test_frame_setitem_copy_no_write( expected = frame df = frame.copy() if using_copy_on_write: - df["foo"]["one"] = 2 + with tm.raises_chained_assignment_error(): + df["foo"]["one"] = 2 else: msg = "A value is trying to be set on a copy of a slice from a DataFrame" with pytest.raises(SettingWithCopyError, match=msg): diff --git a/pandas/tests/indexing/test_chaining_and_caching.py b/pandas/tests/indexing/test_chaining_and_caching.py index 2656cc77c2a9d..5e7abeb86705b 100644 --- a/pandas/tests/indexing/test_chaining_and_caching.py +++ b/pandas/tests/indexing/test_chaining_and_caching.py @@ -50,7 +50,11 @@ def test_slice_consolidate_invalidate_item_cache(self, using_copy_on_write): repr(df) # Assignment to wrong series - df["bb"].iloc[0] = 0.17 + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df["bb"].iloc[0] = 0.17 + else: + df["bb"].iloc[0] = 0.17 df._clear_item_cache() if not using_copy_on_write: tm.assert_almost_equal(df["bb"][0], 0.17) @@ -99,7 +103,11 @@ def test_setitem_cache_updating_slices(self, using_copy_on_write): out_original = out.copy() for ix, row in df.iterrows(): v = out[row["C"]][six:eix] + row["D"] - out[row["C"]][six:eix] = v + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + out[row["C"]][six:eix] = v + else: + out[row["C"]][six:eix] = v if not using_copy_on_write: tm.assert_frame_equal(out, expected) @@ -143,43 +151,55 @@ def test_setitem_chained_setfault(self, using_copy_on_write): df = DataFrame({"response": np.array(data)}) mask = df.response == "timeout" - df.response[mask] = "none" if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df.response[mask] = "none" tm.assert_frame_equal(df, DataFrame({"response": data})) else: + df.response[mask] = "none" tm.assert_frame_equal(df, DataFrame({"response": mdata})) recarray = np.rec.fromarrays([data], names=["response"]) df = DataFrame(recarray) mask = df.response == "timeout" - df.response[mask] = "none" if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df.response[mask] = "none" tm.assert_frame_equal(df, DataFrame({"response": data})) else: + df.response[mask] = "none" tm.assert_frame_equal(df, DataFrame({"response": mdata})) df = DataFrame({"response": data, "response1": data}) df_original = df.copy() mask = df.response == "timeout" - df.response[mask] = "none" if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df.response[mask] = "none" tm.assert_frame_equal(df, df_original) else: + df.response[mask] = "none" tm.assert_frame_equal(df, DataFrame({"response": mdata, "response1": data})) # GH 6056 expected = DataFrame({"A": [np.nan, "bar", "bah", "foo", "bar"]}) df = DataFrame({"A": np.array(["foo", "bar", "bah", "foo", "bar"])}) - df["A"].iloc[0] = np.nan - result = df.head() if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df["A"].iloc[0] = np.nan expected = DataFrame({"A": ["foo", "bar", "bah", "foo", "bar"]}) else: + df["A"].iloc[0] = np.nan expected = DataFrame({"A": [np.nan, "bar", "bah", "foo", "bar"]}) + result = df.head() tm.assert_frame_equal(result, expected) df = DataFrame({"A": np.array(["foo", "bar", "bah", "foo", "bar"])}) - df.A.iloc[0] = np.nan + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df.A.iloc[0] = np.nan + else: + df.A.iloc[0] = np.nan result = df.head() tm.assert_frame_equal(result, expected) @@ -195,11 +215,15 @@ def test_detect_chained_assignment(self, using_copy_on_write): df_original = df.copy() assert df._is_copy is None - df["A"][0] = -5 - df["A"][1] = -6 if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df["A"][0] = -5 + with tm.raises_chained_assignment_error(): + df["A"][1] = -6 tm.assert_frame_equal(df, df_original) else: + df["A"][0] = -5 + df["A"][1] = -6 tm.assert_frame_equal(df, expected) @pytest.mark.arm_slow @@ -218,8 +242,10 @@ def test_detect_chained_assignment_raises( assert df._is_copy is None if using_copy_on_write: - df["A"][0] = -5 - df["A"][1] = -6 + with tm.raises_chained_assignment_error(): + df["A"][0] = -5 + with tm.raises_chained_assignment_error(): + df["A"][1] = -6 tm.assert_frame_equal(df, df_original) elif not using_array_manager: with pytest.raises(SettingWithCopyError, match=msg): @@ -250,8 +276,8 @@ def test_detect_chained_assignment_fails(self, using_copy_on_write): ) if using_copy_on_write: - # TODO(CoW) can we still warn here? - df.loc[0]["A"] = -5 + with tm.raises_chained_assignment_error(): + df.loc[0]["A"] = -5 else: with pytest.raises(SettingWithCopyError, match=msg): df.loc[0]["A"] = -5 @@ -269,9 +295,9 @@ def test_detect_chained_assignment_doc_example(self, using_copy_on_write): assert df._is_copy is None if using_copy_on_write: - # TODO(CoW) can we still warn here? indexer = df.a.str.startswith("o") - df[indexer]["c"] = 42 + with tm.raises_chained_assignment_error(): + df[indexer]["c"] = 42 else: with pytest.raises(SettingWithCopyError, match=msg): indexer = df.a.str.startswith("o") @@ -291,8 +317,8 @@ def test_detect_chained_assignment_object_dtype( df.loc[0]["A"] = 111 if using_copy_on_write: - # TODO(CoW) can we still warn here? - df["A"][0] = 111 + with tm.raises_chained_assignment_error(): + df["A"][0] = 111 tm.assert_frame_equal(df, df_original) elif not using_array_manager: with pytest.raises(SettingWithCopyError, match=msg): @@ -420,8 +446,8 @@ def test_detect_chained_assignment_undefined_column(self, using_copy_on_write): df_original = df.copy() if using_copy_on_write: - # TODO(CoW) can we still warn here? - df.iloc[0:5]["group"] = "a" + with tm.raises_chained_assignment_error(): + df.iloc[0:5]["group"] = "a" tm.assert_frame_equal(df, df_original) else: with pytest.raises(SettingWithCopyError, match=msg): @@ -444,9 +470,12 @@ def test_detect_chained_assignment_changing_dtype( df_original = df.copy() if using_copy_on_write: - df.loc[2]["D"] = "foo" - df.loc[2]["C"] = "foo" - df["C"][2] = "foo" + with tm.raises_chained_assignment_error(): + df.loc[2]["D"] = "foo" + with tm.raises_chained_assignment_error(): + df.loc[2]["C"] = "foo" + with tm.raises_chained_assignment_error(): + df["C"][2] = "foo" tm.assert_frame_equal(df, df_original) if not using_copy_on_write: @@ -475,7 +504,8 @@ def test_setting_with_copy_bug(self, using_copy_on_write): mask = pd.isna(df.c) if using_copy_on_write: - df[["c"]][mask] = df[["b"]][mask] + with tm.raises_chained_assignment_error(): + df[["c"]][mask] = df[["b"]][mask] tm.assert_frame_equal(df, df_original) else: with pytest.raises(SettingWithCopyError, match=msg): @@ -493,7 +523,8 @@ def test_setting_with_copy_bug_no_warning(self): def test_detect_chained_assignment_warnings_errors(self, using_copy_on_write): df = DataFrame({"A": ["aaa", "bbb", "ccc"], "B": [1, 2, 3]}) if using_copy_on_write: - df.loc[0]["A"] = 111 + with tm.raises_chained_assignment_error(): + df.loc[0]["A"] = 111 return with option_context("chained_assignment", "warn"): @@ -559,6 +590,7 @@ def test_cache_updating2(self): index=range(5), ) df["f"] = 0 + # TODO(CoW) protect underlying values of being written to? df.f.values[3] = 1 df.f.values[3] = 2 @@ -580,20 +612,33 @@ def test_iloc_setitem_chained_assignment(self, using_copy_on_write): ck = [True] * len(df) - df["bb"].iloc[0] = 0.13 + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df["bb"].iloc[0] = 0.13 + else: + df["bb"].iloc[0] = 0.13 # GH#3970 this lookup used to break the chained setting to 0.15 df.iloc[ck] - df["bb"].iloc[0] = 0.15 + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df["bb"].iloc[0] = 0.15 + else: + df["bb"].iloc[0] = 0.15 + if not using_copy_on_write: assert df["bb"].iloc[0] == 0.15 else: assert df["bb"].iloc[0] == 2.2 - def test_getitem_loc_assignment_slice_state(self): + def test_getitem_loc_assignment_slice_state(self, using_copy_on_write): # GH 13569 df = DataFrame({"a": [10, 20, 30]}) - df["a"].loc[4] = 40 + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df["a"].loc[4] = 40 + else: + df["a"].loc[4] = 40 tm.assert_frame_equal(df, DataFrame({"a": [10, 20, 30]})) tm.assert_series_equal(df["a"], Series([10, 20, 30], name="a")) diff --git a/pandas/tests/io/test_spss.py b/pandas/tests/io/test_spss.py index a4894ff66ab9f..d507ab07b7cd1 100644 --- a/pandas/tests/io/test_spss.py +++ b/pandas/tests/io/test_spss.py @@ -3,12 +3,15 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + import pandas as pd import pandas._testing as tm pyreadstat = pytest.importorskip("pyreadstat") +@td.skip_copy_on_write_not_yet_implemented @pytest.mark.parametrize("path_klass", [lambda p: p, Path]) def test_spss_labelled_num(path_klass, datapath): # test file from the Haven project (https://haven.tidyverse.org/) @@ -24,6 +27,7 @@ def test_spss_labelled_num(path_klass, datapath): tm.assert_frame_equal(df, expected) +@td.skip_copy_on_write_not_yet_implemented def test_spss_labelled_num_na(datapath): # test file from the Haven project (https://haven.tidyverse.org/) fname = datapath("io", "data", "spss", "labelled-num-na.sav") @@ -38,6 +42,7 @@ def test_spss_labelled_num_na(datapath): tm.assert_frame_equal(df, expected) +@td.skip_copy_on_write_not_yet_implemented def test_spss_labelled_str(datapath): # test file from the Haven project (https://haven.tidyverse.org/) fname = datapath("io", "data", "spss", "labelled-str.sav") @@ -52,6 +57,7 @@ def test_spss_labelled_str(datapath): tm.assert_frame_equal(df, expected) +@td.skip_copy_on_write_not_yet_implemented def test_spss_umlauts(datapath): # test file from the Haven project (https://haven.tidyverse.org/) fname = datapath("io", "data", "spss", "umlauts.sav") diff --git a/pandas/tests/series/accessors/test_dt_accessor.py b/pandas/tests/series/accessors/test_dt_accessor.py index adb11b88cf667..1914bdae07e4b 100644 --- a/pandas/tests/series/accessors/test_dt_accessor.py +++ b/pandas/tests/series/accessors/test_dt_accessor.py @@ -287,8 +287,8 @@ def test_dt_accessor_not_writeable(self, using_copy_on_write): msg = "modifications to a property of a datetimelike.+not supported" with pd.option_context("chained_assignment", "raise"): if using_copy_on_write: - # TODO(CoW) it would be nice to keep a warning/error for this case - ser.dt.hour[0] = 5 + with tm.raises_chained_assignment_error(): + ser.dt.hour[0] = 5 else: with pytest.raises(SettingWithCopyError, match=msg): ser.dt.hour[0] = 5 diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py index 5bc55ee789fe6..de100dba8144d 100644 --- a/pandas/util/_test_decorators.py +++ b/pandas/util/_test_decorators.py @@ -294,3 +294,8 @@ def mark_array_manager_not_yet_implemented(request) -> None: get_option("mode.data_manager") == "array", reason="Test that relies on BlockManager internals or specific behaviour", ) + +skip_copy_on_write_not_yet_implemented = pytest.mark.xfail( + get_option("mode.copy_on_write"), + reason="Not yet implemented/adapted for Copy-on-Write mode", +) diff --git a/scripts/validate_unwanted_patterns.py b/scripts/validate_unwanted_patterns.py index 68a376956429b..8d4aecd596328 100755 --- a/scripts/validate_unwanted_patterns.py +++ b/scripts/validate_unwanted_patterns.py @@ -53,6 +53,7 @@ "__version__", # check np.__version__ in compat.numpy.function "_arrow_dtype_mapping", "_global_config", + "_chained_assignment_msg", }
xref https://github.com/pandas-dev/pandas/issues/48998 One of the consequences of the copy / view rules with the Copy-on-Write proposal is that direct chained assignement (i.e. `df[..][..] = ..`, without using any intermediate variable like `sub = df[..]; sub[..] = ..`) consistently _never_ works (so not depending on which type of indexing operation (eg column selection vs row mask) or on the order of the operations). Given that this will be one of the significant backwards incompatible aspects of the CoW change (and so we will also need to add warnings for this in advance (before using / switching to CoW); but this PR is focusing on the eventual behaviour with CoW enabled, adding such warnings is for another PR), I think it can be useful to keep raising an error for this even after the CoW behaviour would have become the default (in addition to warning about it in advance). And given that this consistently _never_ works, I think it is also fine to keep raising an error in the future for this, as there should never be a reason (in the future) to actually do this. This is somewhat similar to the `SettingWithCopyError` we already have (the error that can optionally be enabled, to get errors instead of warnings). But I decided to not reuse this error but create a new exception class, because it is different enough (it is raised (or not) in different situations; for example SettingWithCopyError will not raise for chained assignment in the cases it is know the work at the moment, and it can raise in non-chained cases (using an intermediate variable) while the new `ChainedAssignmentError` would solely focus on chained cases). This includes the changes from https://github.com/pandas-dev/pandas/pull/49450 as well (the first commit here), since that change was needed to have the refcounting work correctly. But it was a sufficiently stand-alone change, so I broke it off in its own PR. **How does this work?** I am relying on the refcount of the object on which `__setitem__` is being called. If you consider the two simple chained and non-chained cases: ```python df[col][mask] = .. # vs subset = df[col] subset[mask] = .. ``` In the first case, the temporary object (from `df[col]`) only lives in this chain, and doesn't have any references to it otherwise (and would also be cleaned up after the setitem operation), and does has a lower reference count compared to the second, non-chained case where the intermediate object (here called `subset`) is explicitly created by the user. In the second case we don't want an error (because this is valid code to update `subset`; because of triggering CoW it will just not update the parent `df`). From testing, the reference count in the first (chained) case seems to be 3, and if there is another reference to the object, it is always higher than 3, and for now this seems to be robustly so for our full test suite. One problem with this approach is that **it is CPython specific**. For example PyPy doesn't use refcounting, and so this PR won't work to raise the error on PyPy (I probably still need to add a platform check together with the refcount check, to avoid we raise an error about `sys.getrefcount` not being available on PyPy). For example the numpy `resize` method also uses refcounting for certain cases, and therefore requires the user to pass a keyword to disable this check on PyPy (https://github.com/numpy/numpy/pull/8050). I am not familiar enough with PyPy to know if there might be alternatives ways to check this when using PyPy. And it is certainly a downside of the current approach that it would not work consistently across Python implementations. But it is also mostly a user convenience to signal they are doing something that won't work, so my feeling is that in this case this difference can be OK (it is not that correct code would behave differently. The refcount is not used to know if CoW needs to happen or not (which affects actual output), but only to know if it would be useful to signal the user about code that has no effect anyway). - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/49467
2022-11-02T09:10:10Z
2023-01-24T15:10:26Z
2023-01-24T15:10:26Z
2023-01-24T15:28:53Z
Fix memory leak with ujson module
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index b97d0f5232f1e..703d6ede42583 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -565,7 +565,7 @@ I/O - Improved error message in :func:`read_excel` by including the offending sheet name when an exception is raised while reading a file (:issue:`48706`) - Bug when a pickling a subset PyArrow-backed data that would serialize the entire data instead of the subset (:issue:`42600`) - Bug in :func:`read_csv` for a single-line csv with fewer columns than ``names`` raised :class:`.errors.ParserError` with ``engine="c"`` (:issue:`47566`) -- +- Fixed memory leak which stemmed from the initialization of the internal JSON module (:issue:`49222`) Period ^^^^^^ diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c index 260f1ffb6165f..591dff72e3872 100644 --- a/pandas/_libs/src/ujson/python/objToJSON.c +++ b/pandas/_libs/src/ujson/python/objToJSON.c @@ -50,19 +50,18 @@ Numeric decoder derived from TCL library #include "date_conversions.h" #include "datetime.h" -static PyTypeObject *type_decimal; -static PyTypeObject *cls_dataframe; -static PyTypeObject *cls_series; -static PyTypeObject *cls_index; -static PyTypeObject *cls_nat; -static PyTypeObject *cls_na; -PyObject *cls_timedelta; - npy_int64 get_nat(void) { return NPY_MIN_INT64; } typedef char *(*PFN_PyTypeToUTF8)(JSOBJ obj, JSONTypeContext *ti, size_t *_outLen); +int object_is_decimal_type(PyObject *obj); +int object_is_dataframe_type(PyObject *obj); +int object_is_series_type(PyObject *obj); +int object_is_index_type(PyObject *obj); +int object_is_nat_type(PyObject *obj); +int object_is_na_type(PyObject *obj); + typedef struct __NpyArrContext { PyObject *array; char *dataptr; @@ -146,44 +145,6 @@ enum PANDAS_FORMAT { SPLIT, RECORDS, INDEX, COLUMNS, VALUES }; int PdBlock_iterNext(JSOBJ, JSONTypeContext *); -void *initObjToJSON(void) { - PyObject *mod_pandas; - PyObject *mod_nattype; - PyObject *mod_natype; - PyObject *mod_decimal = PyImport_ImportModule("decimal"); - type_decimal = - (PyTypeObject *)PyObject_GetAttrString(mod_decimal, "Decimal"); - Py_DECREF(mod_decimal); - - PyDateTime_IMPORT; - - mod_pandas = PyImport_ImportModule("pandas"); - if (mod_pandas) { - cls_dataframe = - (PyTypeObject *)PyObject_GetAttrString(mod_pandas, "DataFrame"); - cls_index = (PyTypeObject *)PyObject_GetAttrString(mod_pandas, "Index"); - cls_series = - (PyTypeObject *)PyObject_GetAttrString(mod_pandas, "Series"); - Py_DECREF(mod_pandas); - } - - mod_nattype = PyImport_ImportModule("pandas._libs.tslibs.nattype"); - if (mod_nattype) { - cls_nat = - (PyTypeObject *)PyObject_GetAttrString(mod_nattype, "NaTType"); - Py_DECREF(mod_nattype); - } - - mod_natype = PyImport_ImportModule("pandas._libs.missing"); - if (mod_natype) { - cls_na = (PyTypeObject *)PyObject_GetAttrString(mod_natype, "NAType"); - Py_DECREF(mod_natype); - } - - // GH 31463 - return NULL; -} - static TypeContext *createTypeContext(void) { TypeContext *pc; @@ -216,8 +177,7 @@ static TypeContext *createTypeContext(void) { static PyObject *get_values(PyObject *obj) { PyObject *values = NULL; - if (PyObject_TypeCheck(obj, cls_index) || - PyObject_TypeCheck(obj, cls_series)) { + if (object_is_index_type(obj) || object_is_series_type(obj)) { // The special cases to worry about are dt64tz and category[dt64tz]. // In both cases we want the UTC-localized datetime64 ndarray, // without going through and object array of Timestamps. @@ -1510,12 +1470,12 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { pc->PyTypeToUTF8 = PyUnicodeToUTF8; tc->type = JT_UTF8; return; - } else if (PyObject_TypeCheck(obj, type_decimal)) { + } else if (object_is_decimal_type(obj)) { GET_TC(tc)->doubleValue = PyFloat_AsDouble(obj); tc->type = JT_DOUBLE; return; } else if (PyDateTime_Check(obj) || PyDate_Check(obj)) { - if (PyObject_TypeCheck(obj, cls_nat)) { + if (object_is_nat_type(obj)) { tc->type = JT_NULL; return; } @@ -1606,14 +1566,14 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { "%R (0d array) is not JSON serializable at the moment", obj); goto INVALID; - } else if (PyObject_TypeCheck(obj, cls_na)) { + } else if (object_is_na_type(obj)) { tc->type = JT_NULL; return; } ISITERABLE: - if (PyObject_TypeCheck(obj, cls_index)) { + if (object_is_index_type(obj)) { if (enc->outputFormat == SPLIT) { tc->type = JT_OBJECT; pc->iterBegin = Index_iterBegin; @@ -1637,7 +1597,7 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { } return; - } else if (PyObject_TypeCheck(obj, cls_series)) { + } else if (object_is_series_type(obj)) { if (enc->outputFormat == SPLIT) { tc->type = JT_OBJECT; pc->iterBegin = Series_iterBegin; @@ -1701,7 +1661,7 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { pc->iterGetValue = NpyArr_iterGetValue; pc->iterGetName = NpyArr_iterGetName; return; - } else if (PyObject_TypeCheck(obj, cls_dataframe)) { + } else if (object_is_dataframe_type(obj)) { if (enc->blkCtxtPassthru) { pc->pdblock = enc->blkCtxtPassthru; tc->type = @@ -1969,6 +1929,11 @@ char *Object_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) { PyObject *objToJSON(PyObject *Py_UNUSED(self), PyObject *args, PyObject *kwargs) { + PyDateTime_IMPORT; + if (PyDateTimeAPI == NULL) { + return NULL; + } + static char *kwlist[] = {"obj", "ensure_ascii", "double_precision", diff --git a/pandas/_libs/src/ujson/python/ujson.c b/pandas/_libs/src/ujson/python/ujson.c index 5d4a5693c0ff6..c12f88d2f9354 100644 --- a/pandas/_libs/src/ujson/python/ujson.c +++ b/pandas/_libs/src/ujson/python/ujson.c @@ -67,15 +67,385 @@ static PyMethodDef ujsonMethods[] = { {NULL, NULL, 0, NULL} /* Sentinel */ }; -static PyModuleDef moduledef = { - .m_base = PyModuleDef_HEAD_INIT, - .m_name = "_libjson", - .m_methods = ujsonMethods -}; +typedef struct { + PyObject *type_decimal; + PyObject *type_dataframe; + PyObject *type_series; + PyObject *type_index; + PyObject *type_nat; + PyObject *type_na; +} modulestate; + +#define modulestate(o) ((modulestate *)PyModule_GetState(o)) + +static int module_traverse(PyObject *m, visitproc visit, void *arg); +static int module_clear(PyObject *m); +static void module_free(void *module); + +static struct PyModuleDef moduledef = {.m_base = PyModuleDef_HEAD_INIT, + .m_name = "_libjson", + .m_methods = ujsonMethods, + .m_size = sizeof(modulestate), + .m_traverse = module_traverse, + .m_clear = module_clear, + .m_free = module_free}; + +#ifndef PYPY_VERSION +/* Used in objToJSON.c */ +int object_is_decimal_type(PyObject *obj) { + PyObject *module = PyState_FindModule(&moduledef); + if (module == NULL) + return 0; + modulestate *state = modulestate(module); + if (state == NULL) + return 0; + PyObject *type_decimal = state->type_decimal; + if (type_decimal == NULL) { + PyErr_Clear(); + return 0; + } + int result = PyObject_IsInstance(obj, type_decimal); + if (result == -1) { + PyErr_Clear(); + return 0; + } + return result; +} +int object_is_dataframe_type(PyObject *obj) { + PyObject *module = PyState_FindModule(&moduledef); + if (module == NULL) + return 0; + modulestate *state = modulestate(module); + if (state == NULL) + return 0; + PyObject *type_dataframe = state->type_dataframe; + if (type_dataframe == NULL) { + PyErr_Clear(); + return 0; + } + int result = PyObject_IsInstance(obj, type_dataframe); + if (result == -1) { + PyErr_Clear(); + return 0; + } + return result; +} + +int object_is_series_type(PyObject *obj) { + PyObject *module = PyState_FindModule(&moduledef); + if (module == NULL) + return 0; + modulestate *state = modulestate(module); + if (state == NULL) + return 0; + PyObject *type_series = state->type_series; + if (type_series == NULL) { + PyErr_Clear(); + return 0; + } + int result = PyObject_IsInstance(obj, type_series); + if (result == -1) { + PyErr_Clear(); + return 0; + } + return result; +} + +int object_is_index_type(PyObject *obj) { + PyObject *module = PyState_FindModule(&moduledef); + if (module == NULL) + return 0; + modulestate *state = modulestate(module); + if (state == NULL) + return 0; + PyObject *type_index = state->type_index; + if (type_index == NULL) { + PyErr_Clear(); + return 0; + } + int result = PyObject_IsInstance(obj, type_index); + if (result == -1) { + PyErr_Clear(); + return 0; + } + return result; +} + +int object_is_nat_type(PyObject *obj) { + PyObject *module = PyState_FindModule(&moduledef); + if (module == NULL) + return 0; + modulestate *state = modulestate(module); + if (state == NULL) + return 0; + PyObject *type_nat = state->type_nat; + if (type_nat == NULL) { + PyErr_Clear(); + return 0; + } + int result = PyObject_IsInstance(obj, type_nat); + if (result == -1) { + PyErr_Clear(); + return 0; + } + return result; +} + +int object_is_na_type(PyObject *obj) { + PyObject *module = PyState_FindModule(&moduledef); + if (module == NULL) + return 0; + modulestate *state = modulestate(module); + if (state == NULL) + return 0; + PyObject *type_na = state->type_na; + if (type_na == NULL) { + PyErr_Clear(); + return 0; + } + int result = PyObject_IsInstance(obj, type_na); + if (result == -1) { + PyErr_Clear(); + return 0; + } + return result; +} +#else + /* Used in objToJSON.c */ +int object_is_decimal_type(PyObject *obj) { + PyObject *module = PyImport_ImportModule("decimal"); + if (module == NULL) { + PyErr_Clear(); + return 0; + } + PyObject *type_decimal = PyObject_GetAttrString(module, "Decimal"); + if (type_decimal == NULL) { + Py_DECREF(module); + PyErr_Clear(); + return 0; + } + int result = PyObject_IsInstance(obj, type_decimal); + if (result == -1) { + Py_DECREF(module); + Py_DECREF(type_decimal); + PyErr_Clear(); + return 0; + } + return result; +} + +int object_is_dataframe_type(PyObject *obj) { + PyObject *module = PyImport_ImportModule("pandas"); + if (module == NULL) { + PyErr_Clear(); + return 0; + } + PyObject *type_dataframe = PyObject_GetAttrString(module, "DataFrame"); + if (type_dataframe == NULL) { + Py_DECREF(module); + PyErr_Clear(); + return 0; + } + int result = PyObject_IsInstance(obj, type_dataframe); + if (result == -1) { + Py_DECREF(module); + Py_DECREF(type_dataframe); + PyErr_Clear(); + return 0; + } + return result; +} + +int object_is_series_type(PyObject *obj) { + PyObject *module = PyImport_ImportModule("pandas"); + if (module == NULL) { + PyErr_Clear(); + return 0; + } + PyObject *type_series = PyObject_GetAttrString(module, "Series"); + if (type_series == NULL) { + Py_DECREF(module); + PyErr_Clear(); + return 0; + } + int result = PyObject_IsInstance(obj, type_series); + if (result == -1) { + Py_DECREF(module); + Py_DECREF(type_series); + PyErr_Clear(); + return 0; + } + return result; +} + +int object_is_index_type(PyObject *obj) { + PyObject *module = PyImport_ImportModule("pandas"); + if (module == NULL) { + PyErr_Clear(); + return 0; + } + PyObject *type_index = PyObject_GetAttrString(module, "Index"); + if (type_index == NULL) { + Py_DECREF(module); + PyErr_Clear(); + return 0; + } + int result = PyObject_IsInstance(obj, type_index); + if (result == -1) { + Py_DECREF(module); + Py_DECREF(type_index); + PyErr_Clear(); + return 0; + } + return result; +} + +int object_is_nat_type(PyObject *obj) { + PyObject *module = PyImport_ImportModule("pandas._libs.tslibs.nattype"); + if (module == NULL) { + PyErr_Clear(); + return 0; + } + PyObject *type_nat = PyObject_GetAttrString(module, "NaTType"); + if (type_nat == NULL) { + Py_DECREF(module); + PyErr_Clear(); + return 0; + } + int result = PyObject_IsInstance(obj, type_nat); + if (result == -1) { + Py_DECREF(module); + Py_DECREF(type_nat); + PyErr_Clear(); + return 0; + } + return result; +} + +int object_is_na_type(PyObject *obj) { + PyObject *module = PyImport_ImportModule("pandas._libs.missing"); + if (module == NULL) { + PyErr_Clear(); + return 0; + } + PyObject *type_na = PyObject_GetAttrString(module, "NAType"); + if (type_na == NULL) { + Py_DECREF(module); + PyErr_Clear(); + return 0; + } + int result = PyObject_IsInstance(obj, type_na); + if (result == -1) { + Py_DECREF(module); + Py_DECREF(type_na); + PyErr_Clear(); + return 0; + } + return result; +} + +#endif + +static int module_traverse(PyObject *m, visitproc visit, void *arg) { + Py_VISIT(modulestate(m)->type_decimal); + Py_VISIT(modulestate(m)->type_dataframe); + Py_VISIT(modulestate(m)->type_series); + Py_VISIT(modulestate(m)->type_index); + Py_VISIT(modulestate(m)->type_nat); + Py_VISIT(modulestate(m)->type_na); + return 0; +} + +static int module_clear(PyObject *m) { + Py_CLEAR(modulestate(m)->type_decimal); + Py_CLEAR(modulestate(m)->type_dataframe); + Py_CLEAR(modulestate(m)->type_series); + Py_CLEAR(modulestate(m)->type_index); + Py_CLEAR(modulestate(m)->type_nat); + Py_CLEAR(modulestate(m)->type_na); + return 0; +} + +static void module_free(void *module) { module_clear((PyObject *)module); } PyMODINIT_FUNC PyInit_json(void) { - import_array() - initObjToJSON(); // TODO(username): clean up, maybe via tp_free? - return PyModuleDef_Init(&moduledef); + import_array() + PyObject *module; + +#ifndef PYPY_VERSION + // This function is not supported in PyPy. + if ((module = PyState_FindModule(&moduledef)) != NULL) { + Py_INCREF(module); + return module; + } +#endif + + module = PyModule_Create(&moduledef); + if (module == NULL) { + return NULL; + } + +#ifndef PYPY_VERSION + PyObject *mod_decimal = PyImport_ImportModule("decimal"); + if (mod_decimal) { + PyObject *type_decimal = PyObject_GetAttrString(mod_decimal, "Decimal"); + assert(type_decimal != NULL); + modulestate(module)->type_decimal = type_decimal; + Py_DECREF(mod_decimal); + } + + PyObject *mod_pandas = PyImport_ImportModule("pandas"); + if (mod_pandas) { + PyObject *type_dataframe = + PyObject_GetAttrString(mod_pandas, "DataFrame"); + assert(type_dataframe != NULL); + modulestate(module)->type_dataframe = type_dataframe; + + PyObject *type_series = PyObject_GetAttrString(mod_pandas, "Series"); + assert(type_series != NULL); + modulestate(module)->type_series = type_series; + + PyObject *type_index = PyObject_GetAttrString(mod_pandas, "Index"); + assert(type_index != NULL); + modulestate(module)->type_index = type_index; + + Py_DECREF(mod_pandas); + } + + PyObject *mod_nattype = + PyImport_ImportModule("pandas._libs.tslibs.nattype"); + if (mod_nattype) { + PyObject *type_nat = PyObject_GetAttrString(mod_nattype, "NaTType"); + assert(type_nat != NULL); + modulestate(module)->type_nat = type_nat; + + Py_DECREF(mod_nattype); + } + + PyObject *mod_natype = PyImport_ImportModule("pandas._libs.missing"); + if (mod_natype) { + PyObject *type_na = PyObject_GetAttrString(mod_natype, "NAType"); + assert(type_na != NULL); + modulestate(module)->type_na = type_na; + + Py_DECREF(mod_natype); + } else { + PyErr_Clear(); + } +#endif + + /* Not vendored for now + JSONDecodeError = PyErr_NewException("ujson.JSONDecodeError", + PyExc_ValueError, NULL); Py_XINCREF(JSONDecodeError); if + (PyModule_AddObject(module, "JSONDecodeError", JSONDecodeError) < 0) + { + Py_XDECREF(JSONDecodeError); + Py_CLEAR(JSONDecodeError); + Py_DECREF(module); + return NULL; + } + */ + + return module; }
- [X] closes #49222 (Replace xxxx with the GitHub issue number) This is mostly vendored from ujson upstream https://github.com/ultrajson/ultrajson/blob/main/python/ujson.c Using a static PyObject the way we did before is strongly discouraged. This is more verbose but should be more correct, although interestingly pushes us back to the legacy module initialization instead of PEP 489 multi phase initialization
https://api.github.com/repos/pandas-dev/pandas/pulls/49466
2022-11-02T04:05:12Z
2022-11-07T20:53:05Z
2022-11-07T20:53:05Z
2022-12-24T20:43:59Z
STYLE: fix pylint no-else-continue warnings
diff --git a/doc/source/conf.py b/doc/source/conf.py index 39acc28451f54..af93a979789c1 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -105,7 +105,7 @@ if fname == "index.rst" and os.path.abspath(dirname) == source_path: continue - elif pattern == "-api" and reldir.startswith("reference"): + if pattern == "-api" and reldir.startswith("reference"): exclude_patterns.append(fname) elif ( pattern == "whatsnew" diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 3b8380a88bb8b..de93289349cf9 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -3069,10 +3069,10 @@ def maybe_mi_droplevels(indexer, levels): ): # everything continue - else: - # e.g. test_xs_IndexSlice_argument_not_implemented - k_index = np.zeros(len(self), dtype=bool) - k_index[loc_level] = True + + # e.g. test_xs_IndexSlice_argument_not_implemented + k_index = np.zeros(len(self), dtype=bool) + k_index[loc_level] = True else: k_index = loc_level diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index f4332f2c7eb1b..7d9842f7e5341 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -1290,7 +1290,7 @@ def _maybe_coerce_merge_keys(self) -> None: continue # check whether ints and floats - elif is_integer_dtype(rk.dtype) and is_float_dtype(lk.dtype): + if is_integer_dtype(rk.dtype) and is_float_dtype(lk.dtype): # GH 47391 numpy > 1.24 will raise a RuntimeError for nan -> int with np.errstate(invalid="ignore"): # error: Argument 1 to "astype" of "ndarray" has incompatible @@ -1312,7 +1312,7 @@ def _maybe_coerce_merge_keys(self) -> None: ) continue - elif is_float_dtype(rk.dtype) and is_integer_dtype(lk.dtype): + if is_float_dtype(rk.dtype) and is_integer_dtype(lk.dtype): # GH 47391 numpy > 1.24 will raise a RuntimeError for nan -> int with np.errstate(invalid="ignore"): # error: Argument 1 to "astype" of "ndarray" has incompatible @@ -1335,7 +1335,7 @@ def _maybe_coerce_merge_keys(self) -> None: continue # let's infer and see if we are ok - elif lib.infer_dtype(lk, skipna=False) == lib.infer_dtype( + if lib.infer_dtype(lk, skipna=False) == lib.infer_dtype( rk, skipna=False ): continue diff --git a/pandas/io/formats/style_render.py b/pandas/io/formats/style_render.py index 035c5d88a7f4b..06e5702896f2f 100644 --- a/pandas/io/formats/style_render.py +++ b/pandas/io/formats/style_render.py @@ -399,11 +399,11 @@ def _translate_header(self, sparsify_cols: bool, max_cols: int): for r, hide in enumerate(self.hide_columns_): if hide or not clabels: continue - else: - header_row = self._generate_col_header_row( - (r, clabels), max_cols, col_lengths - ) - head.append(header_row) + + header_row = self._generate_col_header_row( + (r, clabels), max_cols, col_lengths + ) + head.append(header_row) # 2) index names if ( diff --git a/pandas/io/json/_normalize.py b/pandas/io/json/_normalize.py index 03afdcda35021..7ed7ce18e355b 100644 --- a/pandas/io/json/_normalize.py +++ b/pandas/io/json/_normalize.py @@ -112,9 +112,9 @@ def nested_to_record( v = new_d.pop(k) new_d[newkey] = v continue - else: - v = new_d.pop(k) - new_d.update(nested_to_record(v, newkey, sep, level + 1, max_level)) + + v = new_d.pop(k) + new_d.update(nested_to_record(v, newkey, sep, level + 1, max_level)) new_ds.append(new_d) if singleton: diff --git a/pandas/tests/io/parser/common/test_common_basic.py b/pandas/tests/io/parser/common/test_common_basic.py index af7ebc5e9555c..8997fc83eb5cf 100644 --- a/pandas/tests/io/parser/common/test_common_basic.py +++ b/pandas/tests/io/parser/common/test_common_basic.py @@ -661,8 +661,8 @@ def test_read_table_same_signature_as_read_csv(all_parsers): assert table_param.annotation == csv_param.annotation assert table_param.kind == csv_param.kind continue - else: - assert table_param == csv_param + + assert table_param == csv_param def test_read_table_equivalency_to_read_csv(all_parsers): diff --git a/pyproject.toml b/pyproject.toml index e936ee6b55b1c..761f3c687d08d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -113,7 +113,6 @@ disable = [ "cyclic-import", "duplicate-code", "inconsistent-return-statements", - "no-else-continue", "no-else-raise", "no-else-return", "redefined-argument-from-local",
Related to https://github.com/pandas-dev/pandas/issues/48855 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/49464
2022-11-02T02:39:47Z
2022-11-02T18:06:34Z
2022-11-02T18:06:34Z
2022-11-02T18:13:28Z
DEPR: Disallow kwargs in ExcelWriter
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index 72f08ec90f5e8..c9ad86f44b0c4 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -275,6 +275,7 @@ Removal of prior version deprecations/changes - Removed ``pandas.SparseSeries`` and ``pandas.SparseDataFrame``, including pickle support. (:issue:`30642`) - Enforced disallowing passing an integer ``fill_value`` to :meth:`DataFrame.shift` and :meth:`Series.shift`` with datetime64, timedelta64, or period dtypes (:issue:`32591`) - Enforced disallowing a string column label into ``times`` in :meth:`DataFrame.ewm` (:issue:`43265`) +- Enforced disallowing the use of ``**kwargs`` in :class:`.ExcelWriter`; use the keyword argument ``engine_kwargs`` instead (:issue:`40430`) - Enforced disallowing a tuple of column labels into :meth:`.DataFrameGroupBy.__getitem__` (:issue:`30546`) - Enforced disallowing setting values with ``.loc`` using a positional slice. Use ``.loc`` with labels or ``.iloc`` with positions instead (:issue:`31840`) - Enforced disallowing ``dict`` or ``set`` objects in ``suffixes`` in :func:`merge` (:issue:`34810`) diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 0b284fd4e9750..da55f43ec477f 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -947,12 +947,6 @@ class ExcelWriter(metaclass=abc.ABCMeta): * odswriter: ``odf.opendocument.OpenDocumentSpreadsheet(**engine_kwargs)`` .. versionadded:: 1.3.0 - **kwargs : dict, optional - Keyword arguments to be passed into the engine. - - .. deprecated:: 1.3.0 - - Use engine_kwargs instead. Notes ----- @@ -1093,17 +1087,7 @@ def __new__( storage_options: StorageOptions = None, if_sheet_exists: Literal["error", "new", "replace", "overlay"] | None = None, engine_kwargs: dict | None = None, - **kwargs, ) -> ExcelWriter: - if kwargs: - if engine_kwargs is not None: - raise ValueError("Cannot use both engine_kwargs and **kwargs") - warnings.warn( - "Use of **kwargs is deprecated, use engine_kwargs instead.", - FutureWarning, - stacklevel=find_stack_level(), - ) - # only switch class if generic(ExcelWriter) if cls is ExcelWriter: if engine is None or (isinstance(engine, str) and engine == "auto"): @@ -1235,7 +1219,6 @@ def __init__( storage_options: StorageOptions = None, if_sheet_exists: str | None = None, engine_kwargs: dict[str, Any] | None = None, - **kwargs, ) -> None: # validate that this engine can handle the extension if isinstance(path, str): diff --git a/pandas/tests/io/excel/test_odswriter.py b/pandas/tests/io/excel/test_odswriter.py index e9dad0c7fedc9..21d31ec8a7fb5 100644 --- a/pandas/tests/io/excel/test_odswriter.py +++ b/pandas/tests/io/excel/test_odswriter.py @@ -19,25 +19,6 @@ def test_write_append_mode_raises(ext): ExcelWriter(f, engine="odf", mode="a") -def test_kwargs(ext): - # GH 42286 - # GH 43445 - # test for error: OpenDocumentSpreadsheet does not accept any arguments - kwargs = {"kwarg": 1} - with tm.ensure_clean(ext) as f: - msg = re.escape("Use of **kwargs is deprecated") - error = re.escape( - "OpenDocumentSpreadsheet() got an unexpected keyword argument 'kwarg'" - ) - with pytest.raises( - TypeError, - match=error, - ): - with tm.assert_produces_warning(FutureWarning, match=msg): - with ExcelWriter(f, engine="odf", **kwargs) as _: - pass - - @pytest.mark.parametrize("engine_kwargs", [None, {"kwarg": 1}]) def test_engine_kwargs(ext, engine_kwargs): # GH 42286 diff --git a/pandas/tests/io/excel/test_openpyxl.py b/pandas/tests/io/excel/test_openpyxl.py index 3b122c8572751..7351629660cee 100644 --- a/pandas/tests/io/excel/test_openpyxl.py +++ b/pandas/tests/io/excel/test_openpyxl.py @@ -86,19 +86,6 @@ def test_write_cells_merge_styled(ext): assert xcell_a2.font == openpyxl_sty_merged -@pytest.mark.parametrize("iso_dates", [True, False]) -def test_kwargs(ext, iso_dates): - # GH 42286 GH 43445 - kwargs = {"iso_dates": iso_dates} - with tm.ensure_clean(ext) as f: - msg = re.escape("Use of **kwargs is deprecated") - with tm.assert_produces_warning(FutureWarning, match=msg): - with ExcelWriter(f, engine="openpyxl", **kwargs) as writer: - assert writer.book.iso_dates == iso_dates - # ExcelWriter won't allow us to close without writing something - DataFrame().to_excel(writer) - - @pytest.mark.parametrize("iso_dates", [True, False]) def test_engine_kwargs_write(ext, iso_dates): # GH 42286 GH 43445 diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py index 307f8b7a7798f..5efdde1392123 100644 --- a/pandas/tests/io/excel/test_writers.py +++ b/pandas/tests/io/excel/test_writers.py @@ -1337,21 +1337,6 @@ def assert_called_and_reset(cls): df.to_excel(filepath, engine="dummy") DummyClass.assert_called_and_reset() - @pytest.mark.parametrize( - "ext", - [ - pytest.param(".xlsx", marks=td.skip_if_no("xlsxwriter")), - pytest.param(".xlsx", marks=td.skip_if_no("openpyxl")), - pytest.param(".ods", marks=td.skip_if_no("odf")), - ], - ) - def test_engine_kwargs_and_kwargs_raises(self, ext): - # GH 40430 - msg = re.escape("Cannot use both engine_kwargs and **kwargs") - with pytest.raises(ValueError, match=msg): - with ExcelWriter("", engine_kwargs={"a": 1}, b=2): - pass - @td.skip_if_no("xlrd") @td.skip_if_no("openpyxl") diff --git a/pandas/tests/io/excel/test_xlsxwriter.py b/pandas/tests/io/excel/test_xlsxwriter.py index 82d47a13aefbc..477d3b05c0a74 100644 --- a/pandas/tests/io/excel/test_xlsxwriter.py +++ b/pandas/tests/io/excel/test_xlsxwriter.py @@ -1,5 +1,4 @@ import contextlib -import re import warnings import pytest @@ -65,17 +64,6 @@ def test_write_append_mode_raises(ext): ExcelWriter(f, engine="xlsxwriter", mode="a") -@pytest.mark.parametrize("nan_inf_to_errors", [True, False]) -def test_kwargs(ext, nan_inf_to_errors): - # GH 42286 - kwargs = {"options": {"nan_inf_to_errors": nan_inf_to_errors}} - with tm.ensure_clean(ext) as f: - msg = re.escape("Use of **kwargs is deprecated") - with tm.assert_produces_warning(FutureWarning, match=msg): - with ExcelWriter(f, engine="xlsxwriter", **kwargs) as writer: - assert writer.book.nan_inf_to_errors == nan_inf_to_errors - - @pytest.mark.parametrize("nan_inf_to_errors", [True, False]) def test_engine_kwargs(ext, nan_inf_to_errors): # GH 42286
Introduced in https://github.com/pandas-dev/pandas/pull/40430
https://api.github.com/repos/pandas-dev/pandas/pulls/49463
2022-11-02T00:41:31Z
2022-11-02T18:07:10Z
2022-11-02T18:07:10Z
2022-11-02T18:07:13Z
DEPR: Enforce melt(value_name) behavior
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index 72f08ec90f5e8..e645d7ec91f68 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -280,6 +280,7 @@ Removal of prior version deprecations/changes - Enforced disallowing ``dict`` or ``set`` objects in ``suffixes`` in :func:`merge` (:issue:`34810`) - Enforced disallowing :func:`merge` to produce duplicated columns through the ``suffixes`` keyword and already existing columns (:issue:`22818`) - Enforced disallowing using :func:`merge` or :func:`join` on a different number of levels (:issue:`34862`) +- Enforced disallowing ``value_name`` argument in :func:`DataFrame.melt` to match an element in the :class:`DataFrame` columns (:issue:`35003`) - Removed setting Categorical._codes directly (:issue:`41429`) - Removed setting Categorical.categories directly (:issue:`47834`) - Removed argument ``inplace`` from :meth:`Categorical.add_categories`, :meth:`Categorical.remove_categories`, :meth:`Categorical.set_categories`, :meth:`Categorical.rename_categories`, :meth:`Categorical.reorder_categories`, :meth:`Categorical.set_ordered`, :meth:`Categorical.as_ordered`, :meth:`Categorical.as_unordered` (:issue:`37981`, :issue:`41118`, :issue:`41133`, :issue:`47834`) diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index 539e585e01acc..300073d893c67 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -5,7 +5,6 @@ TYPE_CHECKING, Hashable, ) -import warnings import numpy as np @@ -13,7 +12,6 @@ Appender, deprecate_kwarg, ) -from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_extension_array_dtype, @@ -56,13 +54,9 @@ def melt( cols = list(frame.columns) if value_name in frame.columns: - warnings.warn( - "This dataframe has a column name that matches the 'value_name' column " - "name of the resulting Dataframe. " - "In the future this will raise an error, please set the 'value_name' " - "parameter of DataFrame.melt to a unique name.", - FutureWarning, - stacklevel=find_stack_level(), + raise ValueError( + f"value_name ({value_name}) cannot match an element in " + "the DataFrame columns." ) if id_vars is not None: diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py index 2013b3484ebff..fe88b7f9caa02 100644 --- a/pandas/tests/reshape/test_melt.py +++ b/pandas/tests/reshape/test_melt.py @@ -1,3 +1,5 @@ +import re + import numpy as np import pytest @@ -1073,19 +1075,16 @@ def test_col_substring_of_stubname(self): result = wide_to_long(wide_df, stubnames="PA", i=["node_id", "A"], j="time") tm.assert_frame_equal(result, expected) - def test_warn_of_column_name_value(self): - # GH34731 - # raise a warning if the resultant value column name matches + def test_raise_of_column_name_value(self): + # GH34731, enforced in 2.0 + # raise a ValueError if the resultant value column name matches # a name in the dataframe already (default name is "value") df = DataFrame({"col": list("ABC"), "value": range(10, 16, 2)}) - expected = DataFrame( - [["A", "col", "A"], ["B", "col", "B"], ["C", "col", "C"]], - columns=["value", "variable", "value"], - ) - with tm.assert_produces_warning(FutureWarning): - result = df.melt(id_vars="value") - tm.assert_frame_equal(result, expected) + with pytest.raises( + ValueError, match=re.escape("value_name (value) cannot match") + ): + df.melt(id_vars="value", value_name="value") @pytest.mark.parametrize("dtype", ["O", "string"]) def test_missing_stubname(self, dtype):
Introduced in https://github.com/pandas-dev/pandas/pull/35003
https://api.github.com/repos/pandas-dev/pandas/pulls/49462
2022-11-02T00:18:15Z
2022-11-04T16:54:54Z
2022-11-04T16:54:54Z
2022-11-04T16:54:57Z
DEPR: Remove check_less_precise in asserters
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index 14b4df286d989..47b7de04d7f95 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -185,6 +185,7 @@ Removal of prior version deprecations/changes - Removed deprecated :meth:`.Styler.where` (:issue:`49397`) - Removed deprecated :meth:`.Styler.render` (:issue:`49397`) - Removed deprecated argument ``null_color`` in :meth:`.Styler.highlight_null` (:issue:`49397`) +- Removed deprecated argument ``check_less_precise`` in :meth:`.testing.assert_frame_equal`, :meth:`.testing.assert_extension_array_equal`, :meth:`.testing.assert_series_equal`, :meth:`.testing.assert_index_equal` (:issue:`30562`) - Removed deprecated ``null_counts`` argument in :meth:`DataFrame.info`. Use ``show_counts`` instead (:issue:`37999`) - Enforced deprecation disallowing passing a timezone-aware :class:`Timestamp` and ``dtype="datetime64[ns]"`` to :class:`Series` or :class:`DataFrame` constructors (:issue:`41555`) - Enforced deprecation disallowing passing a sequence of timezone-aware values and ``dtype="datetime64[ns]"`` to to :class:`Series` or :class:`DataFrame` constructors (:issue:`41555`) diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py index 1f690b39e6fb8..d0a95e764472d 100644 --- a/pandas/_testing/asserters.py +++ b/pandas/_testing/asserters.py @@ -4,18 +4,12 @@ Literal, cast, ) -import warnings import numpy as np -from pandas._libs.lib import ( - NoDefault, - no_default, -) from pandas._libs.missing import is_matching_na from pandas._libs.sparse import SparseIndex import pandas._libs.testing as _testing -from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_bool, @@ -64,7 +58,6 @@ def assert_almost_equal( left, right, check_dtype: bool | Literal["equiv"] = "equiv", - check_less_precise: bool | int | NoDefault = no_default, rtol: float = 1.0e-5, atol: float = 1.0e-8, **kwargs, @@ -83,20 +76,6 @@ def assert_almost_equal( Check dtype if both a and b are the same type. If 'equiv' is passed in, then `RangeIndex` and `Int64Index` are also considered equivalent when doing type checking. - check_less_precise : bool or int, default False - Specify comparison precision. 5 digits (False) or 3 digits (True) - after decimal points are compared. If int, then specify the number - of digits to compare. - - When comparing two numbers, if the first number has magnitude less - than 1e-5, we compare the two numbers directly and check whether - they are equivalent within the specified precision. Otherwise, we - compare the **ratio** of the second number to the first number and - check whether it is equivalent to 1 within the specified precision. - - .. deprecated:: 1.1.0 - Use `rtol` and `atol` instead to define relative/absolute - tolerance, respectively. Similar to :func:`math.isclose`. rtol : float, default 1e-5 Relative tolerance. @@ -106,16 +85,6 @@ def assert_almost_equal( .. versionadded:: 1.1.0 """ - if check_less_precise is not no_default: - warnings.warn( - "The 'check_less_precise' keyword in testing.assert_*_equal " - "is deprecated and will be removed in a future version. " - "You can stop passing 'check_less_precise' to silence this warning.", - FutureWarning, - stacklevel=find_stack_level(), - ) - rtol = atol = _get_tol_from_less_precise(check_less_precise) - if isinstance(left, Index): assert_index_equal( left, @@ -171,46 +140,6 @@ def assert_almost_equal( ) -def _get_tol_from_less_precise(check_less_precise: bool | int) -> float: - """ - Return the tolerance equivalent to the deprecated `check_less_precise` - parameter. - - Parameters - ---------- - check_less_precise : bool or int - - Returns - ------- - float - Tolerance to be used as relative/absolute tolerance. - - Examples - -------- - >>> # Using check_less_precise as a bool: - >>> _get_tol_from_less_precise(False) - 5e-06 - >>> _get_tol_from_less_precise(True) - 0.0005 - >>> # Using check_less_precise as an int representing the decimal - >>> # tolerance intended: - >>> _get_tol_from_less_precise(2) - 0.005 - >>> _get_tol_from_less_precise(8) - 5e-09 - """ - if isinstance(check_less_precise, bool): - if check_less_precise: - # 3-digit tolerance - return 0.5e-3 - else: - # 5-digit tolerance - return 0.5e-5 - else: - # Equivalent to setting checking_less_precise=<decimals> - return 0.5 * 10**-check_less_precise - - def _check_isinstance(left, right, cls): """ Helper method for our assert_* methods that ensures that @@ -250,7 +179,6 @@ def assert_index_equal( right: Index, exact: bool | str = "equiv", check_names: bool = True, - check_less_precise: bool | int | NoDefault = no_default, check_exact: bool = True, check_categorical: bool = True, check_order: bool = True, @@ -271,14 +199,6 @@ def assert_index_equal( Int64Index as well. check_names : bool, default True Whether to check the names attribute. - check_less_precise : bool or int, default False - Specify comparison precision. Only used when check_exact is False. - 5 digits (False) or 3 digits (True) after decimal points are compared. - If int, then specify the digits to compare. - - .. deprecated:: 1.1.0 - Use `rtol` and `atol` instead to define relative/absolute - tolerance, respectively. Similar to :func:`math.isclose`. check_exact : bool, default True Whether to compare number exactly. check_categorical : bool, default True @@ -333,16 +253,6 @@ def _get_ilevel_values(index, level): filled = take_nd(unique._values, level_codes, fill_value=unique._na_value) return unique._shallow_copy(filled, name=index.names[level]) - if check_less_precise is not no_default: - warnings.warn( - "The 'check_less_precise' keyword in testing.assert_*_equal " - "is deprecated and will be removed in a future version. " - "You can stop passing 'check_less_precise' to silence this warning.", - FutureWarning, - stacklevel=find_stack_level(), - ) - rtol = atol = _get_tol_from_less_precise(check_less_precise) - # instance validation _check_isinstance(left, right, Index) @@ -775,7 +685,6 @@ def assert_extension_array_equal( right, check_dtype: bool | Literal["equiv"] = True, index_values=None, - check_less_precise=no_default, check_exact: bool = False, rtol: float = 1.0e-5, atol: float = 1.0e-8, @@ -791,14 +700,6 @@ def assert_extension_array_equal( Whether to check if the ExtensionArray dtypes are identical. index_values : numpy.ndarray, default None Optional index (shared by both left and right), used in output. - check_less_precise : bool or int, default False - Specify comparison precision. Only used when check_exact is False. - 5 digits (False) or 3 digits (True) after decimal points are compared. - If int, then specify the digits to compare. - - .. deprecated:: 1.1.0 - Use `rtol` and `atol` instead to define relative/absolute - tolerance, respectively. Similar to :func:`math.isclose`. check_exact : bool, default False Whether to compare number exactly. rtol : float, default 1e-5 @@ -823,16 +724,6 @@ def assert_extension_array_equal( >>> b, c = a.array, a.array >>> tm.assert_extension_array_equal(b, c) """ - if check_less_precise is not no_default: - warnings.warn( - "The 'check_less_precise' keyword in testing.assert_*_equal " - "is deprecated and will be removed in a future version. " - "You can stop passing 'check_less_precise' to silence this warning.", - FutureWarning, - stacklevel=find_stack_level(), - ) - rtol = atol = _get_tol_from_less_precise(check_less_precise) - assert isinstance(left, ExtensionArray), "left is not an ExtensionArray" assert isinstance(right, ExtensionArray), "right is not an ExtensionArray" if check_dtype: @@ -881,7 +772,6 @@ def assert_series_equal( check_dtype: bool | Literal["equiv"] = True, check_index_type: bool | Literal["equiv"] = "equiv", check_series_type: bool = True, - check_less_precise: bool | int | NoDefault = no_default, check_names: bool = True, check_exact: bool = False, check_datetimelike_compat: bool = False, @@ -910,20 +800,6 @@ def assert_series_equal( are identical. check_series_type : bool, default True Whether to check the Series class is identical. - check_less_precise : bool or int, default False - Specify comparison precision. Only used when check_exact is False. - 5 digits (False) or 3 digits (True) after decimal points are compared. - If int, then specify the digits to compare. - - When comparing two numbers, if the first number has magnitude less - than 1e-5, we compare the two numbers directly and check whether - they are equivalent within the specified precision. Otherwise, we - compare the **ratio** of the second number to the first number and - check whether it is equivalent to 1 within the specified precision. - - .. deprecated:: 1.1.0 - Use `rtol` and `atol` instead to define relative/absolute - tolerance, respectively. Similar to :func:`math.isclose`. check_names : bool, default True Whether to check the Series and Index names attribute. check_exact : bool, default False @@ -978,16 +854,6 @@ def assert_series_equal( if not check_index and check_like: raise ValueError("check_like must be False if check_index is False") - if check_less_precise is not no_default: - warnings.warn( - "The 'check_less_precise' keyword in testing.assert_*_equal " - "is deprecated and will be removed in a future version. " - "You can stop passing 'check_less_precise' to silence this warning.", - FutureWarning, - stacklevel=find_stack_level(), - ) - rtol = atol = _get_tol_from_less_precise(check_less_precise) - # instance validation _check_isinstance(left, right, Series) @@ -1150,7 +1016,6 @@ def assert_frame_equal( check_index_type: bool | Literal["equiv"] = "equiv", check_column_type: bool | Literal["equiv"] = "equiv", check_frame_type: bool = True, - check_less_precise=no_default, check_names: bool = True, by_blocks: bool = False, check_exact: bool = False, @@ -1188,20 +1053,6 @@ def assert_frame_equal( :func:`assert_index_equal`. check_frame_type : bool, default True Whether to check the DataFrame class is identical. - check_less_precise : bool or int, default False - Specify comparison precision. Only used when check_exact is False. - 5 digits (False) or 3 digits (True) after decimal points are compared. - If int, then specify the digits to compare. - - When comparing two numbers, if the first number has magnitude less - than 1e-5, we compare the two numbers directly and check whether - they are equivalent within the specified precision. Otherwise, we - compare the **ratio** of the second number to the first number and - check whether it is equivalent to 1 within the specified precision. - - .. deprecated:: 1.1.0 - Use `rtol` and `atol` instead to define relative/absolute - tolerance, respectively. Similar to :func:`math.isclose`. check_names : bool, default True Whether to check that the `names` attribute for both the `index` and `column` attributes of the DataFrame is identical. @@ -1271,16 +1122,6 @@ def assert_frame_equal( """ __tracebackhide__ = True - if check_less_precise is not no_default: - warnings.warn( - "The 'check_less_precise' keyword in testing.assert_*_equal " - "is deprecated and will be removed in a future version. " - "You can stop passing 'check_less_precise' to silence this warning.", - FutureWarning, - stacklevel=find_stack_level(), - ) - rtol = atol = _get_tol_from_less_precise(check_less_precise) - # instance validation _check_isinstance(left, right, DataFrame) diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py index 605dfb2551410..8d4d705296f35 100644 --- a/pandas/tests/frame/test_reductions.py +++ b/pandas/tests/frame/test_reductions.py @@ -228,8 +228,7 @@ def sem(x): check_dates=True, ) - # GH#32571 check_less_precise is needed on apparently-random - # py37-npdev builds and OSX-PY36-min_version builds + # GH#32571: rol needed for flaky CI builds # mixed types (with upcasting happening) assert_stat_op_calc( "sum", diff --git a/pandas/tests/util/test_assert_almost_equal.py b/pandas/tests/util/test_assert_almost_equal.py index ab53707771be6..ba52536e246d0 100644 --- a/pandas/tests/util/test_assert_almost_equal.py +++ b/pandas/tests/util/test_assert_almost_equal.py @@ -69,16 +69,6 @@ def _assert_not_almost_equal_both(a, b, **kwargs): _assert_not_almost_equal(b, a, **kwargs) -@pytest.mark.parametrize( - "a,b,check_less_precise", - [(1.1, 1.1, False), (1.1, 1.100001, True), (1.1, 1.1001, 2)], -) -def test_assert_almost_equal_deprecated(a, b, check_less_precise): - # GH#30562 - with tm.assert_produces_warning(FutureWarning): - _assert_almost_equal_both(a, b, check_less_precise=check_less_precise) - - @pytest.mark.parametrize( "a,b", [ @@ -122,7 +112,7 @@ def test_assert_not_almost_equal_numbers(a, b): ], ) def test_assert_almost_equal_numbers_atol(a, b): - # Equivalent to the deprecated check_less_precise=True + # Equivalent to the deprecated check_less_precise=True, enforced in 2.0 _assert_almost_equal_both(a, b, rtol=0.5e-3, atol=0.5e-3)
Introduced in https://github.com/pandas-dev/pandas/pull/30562
https://api.github.com/repos/pandas-dev/pandas/pulls/49461
2022-11-02T00:02:58Z
2022-11-02T14:18:09Z
2022-11-02T14:18:09Z
2022-11-02T16:17:52Z
(🎁) add python 3.11 to sdist.yml
diff --git a/.github/workflows/sdist.yml b/.github/workflows/sdist.yml index 14cede7bc1a39..7c20545105009 100644 --- a/.github/workflows/sdist.yml +++ b/.github/workflows/sdist.yml @@ -30,7 +30,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.8", "3.9", "3.10"] + python-version: ["3.8", "3.9", "3.10", "3.11"] concurrency: # https://github.community/t/concurrecy-not-work-for-push/183068/7 group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-${{matrix.python-version}}-sdist @@ -42,7 +42,7 @@ jobs: fetch-depth: 0 - name: Set up Python - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} @@ -86,6 +86,8 @@ jobs: pip install numpy==1.20.3 ;; 3.10) pip install numpy==1.21.2 ;; + 3.11) + pip install numpy==1.23.2 ;; esac - name: Import pandas
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/49460
2022-11-01T23:32:50Z
2022-11-02T10:41:50Z
2022-11-02T10:41:50Z
2022-11-02T10:42:00Z
DEPR: inplace argument in set_axis
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index 14b4df286d989..30142aa4a7f64 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -199,6 +199,7 @@ Removal of prior version deprecations/changes - Removed ``keep_tz`` argument in :meth:`DatetimeIndex.to_series` (:issue:`29731`) - Remove arguments ``names`` and ``dtype`` from :meth:`Index.copy` and ``levels`` and ``codes`` from :meth:`MultiIndex.copy` (:issue:`35853`, :issue:`36685`) - Remove argument ``inplace`` from :meth:`MultiIndex.set_levels` and :meth:`MultiIndex.set_codes` (:issue:`35626`) +- Removed argument ``inplace`` from :meth:`DataFrame.set_axis` and :meth:`Series.set_axis`, use ``obj = obj.set_axis(..., copy=False)`` instead (:issue:`48130`) - Disallow passing positional arguments to :meth:`MultiIndex.set_levels` and :meth:`MultiIndex.set_codes` (:issue:`41485`) - Removed :meth:`MultiIndex.is_lexsorted` and :meth:`MultiIndex.lexsort_depth` (:issue:`38701`) - Removed argument ``how`` from :meth:`PeriodIndex.astype`, use :meth:`PeriodIndex.to_timestamp` instead (:issue:`37982`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 044c40c58b85c..58859054943b3 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5046,39 +5046,6 @@ def align( broadcast_axis=broadcast_axis, ) - @overload - def set_axis( - self, - labels, - *, - axis: Axis = ..., - inplace: Literal[False] | lib.NoDefault = ..., - copy: bool | lib.NoDefault = ..., - ) -> DataFrame: - ... - - @overload - def set_axis( - self, - labels, - *, - axis: Axis = ..., - inplace: Literal[True], - copy: bool | lib.NoDefault = ..., - ) -> None: - ... - - @overload - def set_axis( - self, - labels, - *, - axis: Axis = ..., - inplace: bool | lib.NoDefault = ..., - copy: bool | lib.NoDefault = ..., - ) -> DataFrame | None: - ... - # error: Signature of "set_axis" incompatible with supertype "NDFrame" @Appender( """ @@ -5123,10 +5090,9 @@ def set_axis( labels, *, axis: Axis = 0, - inplace: bool | lib.NoDefault = lib.no_default, - copy: bool | lib.NoDefault = lib.no_default, - ): - return super().set_axis(labels, axis=axis, inplace=inplace, copy=copy) + copy: bool = True, + ) -> DataFrame: + return super().set_axis(labels, axis=axis, copy=copy) @Substitution(**_shared_doc_kwargs) @Appender(NDFrame.reindex.__doc__) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index ec107216f14a2..f88fe35360074 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -695,47 +695,13 @@ def size(self) -> int: # expected "int") [return-value] return np.prod(self.shape) # type: ignore[return-value] - @overload - def set_axis( - self: NDFrameT, - labels, - *, - axis: Axis = ..., - inplace: Literal[False] | lib.NoDefault = ..., - copy: bool_t | lib.NoDefault = ..., - ) -> NDFrameT: - ... - - @overload - def set_axis( - self, - labels, - *, - axis: Axis = ..., - inplace: Literal[True], - copy: bool_t | lib.NoDefault = ..., - ) -> None: - ... - - @overload - def set_axis( - self: NDFrameT, - labels, - *, - axis: Axis = ..., - inplace: bool_t | lib.NoDefault = ..., - copy: bool_t | lib.NoDefault = ..., - ) -> NDFrameT | None: - ... - def set_axis( self: NDFrameT, labels, *, axis: Axis = 0, - inplace: bool_t | lib.NoDefault = lib.no_default, - copy: bool_t | lib.NoDefault = lib.no_default, - ) -> NDFrameT | None: + copy: bool_t = True, + ) -> NDFrameT: """ Assign desired index to given axis. @@ -751,11 +717,6 @@ def set_axis( The axis to update. The value 0 identifies the rows. For `Series` this parameter is unused and defaults to 0. - inplace : bool, default False - Whether to return a new %(klass)s instance. - - .. deprecated:: 1.5.0 - copy : bool, default True Whether to make a copy of the underlying data. @@ -763,33 +724,14 @@ def set_axis( Returns ------- - renamed : %(klass)s or None - An object of type %(klass)s or None if ``inplace=True``. + renamed : %(klass)s + An object of type %(klass)s. See Also -------- %(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s. """ - if inplace is not lib.no_default: - warnings.warn( - f"{type(self).__name__}.set_axis 'inplace' keyword is deprecated " - "and will be removed in a future version. Use " - "`obj = obj.set_axis(..., copy=False)` instead", - FutureWarning, - stacklevel=find_stack_level(), - ) - else: - inplace = False - - if inplace: - if copy is True: - raise ValueError("Cannot specify both inplace=True and copy=True") - copy = False - elif copy is lib.no_default: - copy = True - - self._check_inplace_and_allows_duplicate_labels(inplace) - return self._set_axis_nocheck(labels, axis, inplace, copy=copy) + return self._set_axis_nocheck(labels, axis, inplace=False, copy=copy) @final def _set_axis_nocheck(self, labels, axis: Axis, inplace: bool_t, copy: bool_t): diff --git a/pandas/core/series.py b/pandas/core/series.py index 9607d57766b11..dfa540f3acf0c 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -4925,39 +4925,6 @@ def rename( else: return self._set_name(index, inplace=inplace) - @overload - def set_axis( - self, - labels, - *, - axis: Axis = ..., - inplace: Literal[False] | lib.NoDefault = ..., - copy: bool | lib.NoDefault = ..., - ) -> Series: - ... - - @overload - def set_axis( - self, - labels, - *, - axis: Axis = ..., - inplace: Literal[True], - copy: bool | lib.NoDefault = ..., - ) -> None: - ... - - @overload - def set_axis( - self, - labels, - *, - axis: Axis = ..., - inplace: bool | lib.NoDefault = ..., - copy: bool | lib.NoDefault = ..., - ) -> Series | None: - ... - # error: Signature of "set_axis" incompatible with supertype "NDFrame" @Appender( """ @@ -4984,15 +4951,14 @@ def set_axis( see_also_sub="", ) @Appender(NDFrame.set_axis.__doc__) - def set_axis( # type: ignore[override] + def set_axis( self, labels, *, axis: Axis = 0, - inplace: bool | lib.NoDefault = lib.no_default, - copy: bool | lib.NoDefault = lib.no_default, - ) -> Series | None: - return super().set_axis(labels, axis=axis, inplace=inplace, copy=copy) + copy: bool = True, + ) -> Series: + return super().set_axis(labels, axis=axis, copy=copy) # error: Cannot determine type of 'reindex' @doc( diff --git a/pandas/tests/frame/methods/test_set_axis.py b/pandas/tests/frame/methods/test_set_axis.py index 8e597e1e9fa69..7efd4434f8412 100644 --- a/pandas/tests/frame/methods/test_set_axis.py +++ b/pandas/tests/frame/methods/test_set_axis.py @@ -16,14 +16,9 @@ def obj(self): def test_set_axis(self, obj): # GH14636; this tests setting index for both Series and DataFrame new_index = list("abcd")[: len(obj)] - expected = obj.copy() expected.index = new_index - - # inplace=False - msg = "set_axis 'inplace' keyword is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg): - result = obj.set_axis(new_index, axis=0, inplace=False) + result = obj.set_axis(new_index, axis=0) tm.assert_equal(expected, result) def test_set_axis_copy(self, obj): @@ -34,12 +29,6 @@ def test_set_axis_copy(self, obj): expected = obj.copy() expected.index = new_index - with pytest.raises( - ValueError, match="Cannot specify both inplace=True and copy=True" - ): - with tm.assert_produces_warning(FutureWarning): - obj.set_axis(new_index, axis=0, inplace=True, copy=True) - result = obj.set_axis(new_index, axis=0, copy=True) tm.assert_equal(expected, result) assert result is not obj @@ -77,40 +66,17 @@ def test_set_axis_copy(self, obj): for i in range(obj.shape[1]) ) - # Do this last since it alters obj inplace - with tm.assert_produces_warning(FutureWarning): - res = obj.set_axis(new_index, inplace=True, copy=False) - assert res is None - tm.assert_equal(expected, obj) + res = obj.set_axis(new_index, copy=False) + tm.assert_equal(expected, res) # check we did NOT make a copy - if obj.ndim == 1: - assert tm.shares_memory(obj, orig) + if res.ndim == 1: + assert tm.shares_memory(res, orig) else: assert all( - tm.shares_memory(obj.iloc[:, i], orig.iloc[:, i]) - for i in range(obj.shape[1]) + tm.shares_memory(res.iloc[:, i], orig.iloc[:, i]) + for i in range(res.shape[1]) ) - @pytest.mark.parametrize("axis", [0, "index", 1, "columns"]) - def test_set_axis_inplace_axis(self, axis, obj): - # GH#14636 - if obj.ndim == 1 and axis in [1, "columns"]: - # Series only has [0, "index"] - return - - new_index = list("abcd")[: len(obj)] - - expected = obj.copy() - if axis in [0, "index"]: - expected.index = new_index - else: - expected.columns = new_index - - result = obj.copy() - with tm.assert_produces_warning(FutureWarning): - result.set_axis(new_index, axis=axis, inplace=True) - tm.assert_equal(result, expected) - def test_set_axis_unnamed_kwarg_warns(self, obj): # omitting the "axis" parameter new_index = list("abcd")[: len(obj)] @@ -118,10 +84,7 @@ def test_set_axis_unnamed_kwarg_warns(self, obj): expected = obj.copy() expected.index = new_index - with tm.assert_produces_warning( - FutureWarning, match="set_axis 'inplace' keyword" - ): - result = obj.set_axis(new_index, inplace=False) + result = obj.set_axis(new_index) tm.assert_equal(result, expected) @pytest.mark.parametrize("axis", [3, "foo"]) diff --git a/pandas/tests/generic/test_duplicate_labels.py b/pandas/tests/generic/test_duplicate_labels.py index c9036958cbd74..d6d5c29e6d888 100644 --- a/pandas/tests/generic/test_duplicate_labels.py +++ b/pandas/tests/generic/test_duplicate_labels.py @@ -414,7 +414,6 @@ def test_dataframe_insert_raises(): "method, frame_only", [ (operator.methodcaller("set_index", "A", inplace=True), True), - (operator.methodcaller("set_axis", ["A", "B"], inplace=True), False), (operator.methodcaller("reset_index", inplace=True), True), (operator.methodcaller("rename", lambda x: x, inplace=True), False), ], @@ -427,19 +426,11 @@ def test_inplace_raises(method, frame_only): s.flags.allows_duplicate_labels = False msg = "Cannot specify" - warn_msg = "Series.set_axis 'inplace' keyword" - if "set_axis" in str(method): - warn = FutureWarning - else: - warn = None - with pytest.raises(ValueError, match=msg): - with tm.assert_produces_warning(warn, match=warn_msg): - method(df) + method(df) if not frame_only: with pytest.raises(ValueError, match=msg): - with tm.assert_produces_warning(warn, match=warn_msg): - method(s) + method(s) def test_pickle():
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/v2.0.0.rst` file if fixing a bug or adding a new feature. introduced in #48130
https://api.github.com/repos/pandas-dev/pandas/pulls/49459
2022-11-01T23:29:50Z
2022-11-02T18:51:55Z
2022-11-02T18:51:55Z
2022-11-03T01:38:16Z
DEPR: Index(ndarray[object]) inferring numeric dtype
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index 92cc7b04496bc..3db616e9b8cea 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -293,6 +293,7 @@ Removal of prior version deprecations/changes - Changed the behavior of :func:`to_datetime` with argument "now" with ``utc=False`` to match ``Timestamp("now")`` (:issue:`18705`) - Changed behavior of :meth:`SparseArray.astype` when given a dtype that is not explicitly ``SparseDtype``, cast to the exact requested dtype rather than silently using a ``SparseDtype`` instead (:issue:`34457`) - Changed behavior of :class:`DataFrame` constructor given floating-point ``data`` and an integer ``dtype``, when the data cannot be cast losslessly, the floating point dtype is retained, matching :class:`Series` behavior (:issue:`41170`) +- Changed behavior of :class:`Index` constructor when given a ``np.ndarray`` with object-dtype containing numeric entries; this now retains object dtype rather than inferring a numeric dtype, consistent with :class:`Series` behavior (:issue:`42870`) - Changed behavior of :class:`DataFrame` constructor when passed a ``dtype`` (other than int) that the data cannot be cast to; it now raises instead of silently ignoring the dtype (:issue:`41733`) - Changed the behavior of :class:`Series` constructor, it will no longer infer a datetime64 or timedelta64 dtype from string entries (:issue:`41731`) - Changed behavior of :class:`Timestamp` constructor with a ``np.datetime64`` object and a ``tz`` passed to interpret the input as a wall-time as opposed to a UTC time (:issue:`42288`) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index d8300bb29c274..8829ab0fa6805 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -654,9 +654,7 @@ def _with_infer(cls, *args, **kwargs): Constructor that uses the 1.0.x behavior inferring numeric dtypes for ndarray[object] inputs. """ - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", ".*the Index constructor", FutureWarning) - result = cls(*args, **kwargs) + result = cls(*args, **kwargs) if result.dtype == _dtype_obj and not result._is_multi: # error: Argument 1 to "maybe_convert_objects" has incompatible type @@ -7220,13 +7218,8 @@ def _maybe_cast_data_without_dtype( if not cast_numeric_deprecated: # i.e. we started with a list, not an ndarray[object] return result + return subarr - warnings.warn( - "In a future version, the Index constructor will not infer numeric " - "dtypes when passed object-dtype sequences (matching Series behavior)", - FutureWarning, - stacklevel=find_stack_level(), - ) result = ensure_wrapped_if_datetimelike(result) return result diff --git a/pandas/tests/indexes/numeric/test_numeric.py b/pandas/tests/indexes/numeric/test_numeric.py index dd62ad8b31fae..4a6fc3a42b3ee 100644 --- a/pandas/tests/indexes/numeric/test_numeric.py +++ b/pandas/tests/indexes/numeric/test_numeric.py @@ -139,8 +139,8 @@ def test_constructor_coerce(self, mixed_index, float_index): self.check_coerce(mixed_index, Index([1.5, 2, 3, 4, 5])) self.check_coerce(float_index, Index(np.arange(5) * 2.5)) - with tm.assert_produces_warning(FutureWarning, match="will not infer"): - result = Index(np.array(np.arange(5) * 2.5, dtype=object)) + result = Index(np.array(np.arange(5) * 2.5, dtype=object)) + assert result.dtype == object # as of 2.0 to match Series self.check_coerce(float_index, result.astype("float64")) def test_constructor_explicit(self, mixed_index, float_index): @@ -479,12 +479,13 @@ def test_constructor_corner(self, dtype): assert index.values.dtype == index.dtype if dtype == np.int64: - msg = "will not infer" - with tm.assert_produces_warning(FutureWarning, match=msg): - without_dtype = Index(arr) + without_dtype = Index(arr) + # as of 2.0 we do not infer a dtype when we get an object-dtype + # ndarray of numbers, matching Series behavior + assert without_dtype.dtype == object exact = True if index_cls is Int64Index else "equiv" - tm.assert_index_equal(index, without_dtype, exact=exact) + tm.assert_index_equal(index, without_dtype.astype(np.int64), exact=exact) # preventing casting arr = np.array([1, "2", 3, "4"], dtype=object) diff --git a/pandas/tests/indexes/ranges/test_constructors.py b/pandas/tests/indexes/ranges/test_constructors.py index c4f26220f87d1..74bcaa8529ffc 100644 --- a/pandas/tests/indexes/ranges/test_constructors.py +++ b/pandas/tests/indexes/ranges/test_constructors.py @@ -148,8 +148,7 @@ def test_constructor_corner(self): arr = np.array([1, 2, 3, 4], dtype=object) index = RangeIndex(1, 5) assert index.values.dtype == np.int64 - with tm.assert_produces_warning(FutureWarning, match="will not infer"): - expected = Index(arr).astype("int64") + expected = Index(arr).astype("int64") tm.assert_index_equal(index, expected, exact="equiv")
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/49458
2022-11-01T23:12:59Z
2022-11-02T18:15:32Z
2022-11-02T18:15:32Z
2022-11-02T18:53:48Z
DEPR: Index methods, to_time, Categorical constructor
diff --git a/asv_bench/benchmarks/index_cached_properties.py b/asv_bench/benchmarks/index_cached_properties.py index 1a88bb7eef37a..349841f695416 100644 --- a/asv_bench/benchmarks/index_cached_properties.py +++ b/asv_bench/benchmarks/index_cached_properties.py @@ -70,6 +70,3 @@ def time_engine(self, index_type): def time_inferred_type(self, index_type): self.idx.inferred_type - - def time_is_all_dates(self, index_type): - self.idx.is_all_dates diff --git a/doc/redirects.csv b/doc/redirects.csv index d0f4ae331f7e3..f0fab09196f26 100644 --- a/doc/redirects.csv +++ b/doc/redirects.csv @@ -661,7 +661,6 @@ generated/pandas.Index.identical,../reference/api/pandas.Index.identical generated/pandas.Index.inferred_type,../reference/api/pandas.Index.inferred_type generated/pandas.Index.insert,../reference/api/pandas.Index.insert generated/pandas.Index.intersection,../reference/api/pandas.Index.intersection -generated/pandas.Index.is_all_dates,../reference/api/pandas.Index.is_all_dates generated/pandas.Index.is_boolean,../reference/api/pandas.Index.is_boolean generated/pandas.Index.is_categorical,../reference/api/pandas.Index.is_categorical generated/pandas.Index.is_floating,../reference/api/pandas.Index.is_floating @@ -671,7 +670,6 @@ generated/pandas.Index.is_integer,../reference/api/pandas.Index.is_integer generated/pandas.Index.is_interval,../reference/api/pandas.Index.is_interval generated/pandas.Index.is_lexsorted_for_tuple,../reference/api/pandas.Index.is_lexsorted_for_tuple generated/pandas.Index.is_monotonic_decreasing,../reference/api/pandas.Index.is_monotonic_decreasing -generated/pandas.Index.is_monotonic,../reference/api/pandas.Index.is_monotonic generated/pandas.Index.is_monotonic_increasing,../reference/api/pandas.Index.is_monotonic_increasing generated/pandas.Index.isna,../reference/api/pandas.Index.isna generated/pandas.Index.isnull,../reference/api/pandas.Index.isnull @@ -1068,7 +1066,6 @@ generated/pandas.Series.interpolate,../reference/api/pandas.Series.interpolate generated/pandas.Series.is_copy,../reference/api/pandas.Series.is_copy generated/pandas.Series.isin,../reference/api/pandas.Series.isin generated/pandas.Series.is_monotonic_decreasing,../reference/api/pandas.Series.is_monotonic_decreasing -generated/pandas.Series.is_monotonic,../reference/api/pandas.Series.is_monotonic generated/pandas.Series.is_monotonic_increasing,../reference/api/pandas.Series.is_monotonic_increasing generated/pandas.Series.isna,../reference/api/pandas.Series.isna generated/pandas.Series.isnull,../reference/api/pandas.Series.isnull diff --git a/doc/source/reference/indexing.rst b/doc/source/reference/indexing.rst index b7866a0076d84..81148b4a29df5 100644 --- a/doc/source/reference/indexing.rst +++ b/doc/source/reference/indexing.rst @@ -25,7 +25,6 @@ Properties :toctree: api/ Index.values - Index.is_monotonic Index.is_monotonic_increasing Index.is_monotonic_decreasing Index.is_unique @@ -33,7 +32,6 @@ Properties Index.hasnans Index.dtype Index.inferred_type - Index.is_all_dates Index.shape Index.name Index.names diff --git a/doc/source/reference/series.rst b/doc/source/reference/series.rst index c9604f48dd334..c8bbe922f5313 100644 --- a/doc/source/reference/series.rst +++ b/doc/source/reference/series.rst @@ -167,7 +167,6 @@ Computations / descriptive stats Series.unique Series.nunique Series.is_unique - Series.is_monotonic Series.is_monotonic_increasing Series.is_monotonic_decreasing Series.value_counts diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index 04e5154ca1a0b..f7b4b20ddba22 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -159,6 +159,7 @@ Deprecations Removal of prior version deprecations/changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Removed deprecated module ``pandas.core.index`` (:issue:`30193`) +- Removed deprecated alias ``pandas.core.tools.datetimes.to_time``, import the function directly from ``pandas.core.tools.times`` instead (:issue:`34145`) - Removed deprecated :meth:`Categorical.to_dense`, use ``np.asarray(cat)`` instead (:issue:`32639`) - Removed deprecated :meth:`Categorical.take_nd` (:issue:`27745`) - Removed deprecated :meth:`Categorical.mode`, use ``Series(cat).mode()`` instead (:issue:`45033`) @@ -182,6 +183,8 @@ Removal of prior version deprecations/changes - Removed deprecated :meth:`.Styler.render` (:issue:`49397`) - Removed deprecated argument ``null_color`` in :meth:`.Styler.highlight_null` (:issue:`49397`) - Removed deprecated ``null_counts`` argument in :meth:`DataFrame.info`. Use ``show_counts`` instead (:issue:`37999`) +- Removed deprecated :meth:`Index.is_monotonic`, and :meth:`Series.is_monotonic`; use ``obj.is_monotonic_increasing`` instead (:issue:`45422`) +- Removed deprecated :meth:`Index.is_all_dates` (:issue:`36697`) - Enforced deprecation disallowing passing a timezone-aware :class:`Timestamp` and ``dtype="datetime64[ns]"`` to :class:`Series` or :class:`DataFrame` constructors (:issue:`41555`) - Enforced deprecation disallowing passing a sequence of timezone-aware values and ``dtype="datetime64[ns]"`` to to :class:`Series` or :class:`DataFrame` constructors (:issue:`41555`) - Enforced deprecation disallowing unit-less "datetime64" dtype in :meth:`Series.astype` and :meth:`DataFrame.astype` (:issue:`47844`) @@ -245,6 +248,8 @@ Removal of prior version deprecations/changes - Disallow passing non-keyword arguments to :meth:`DataFrame.replace`, :meth:`Series.replace` except for ``to_replace`` and ``value`` (:issue:`47587`) - Disallow passing non-keyword arguments to :meth:`DataFrame.sort_values` except for ``by`` (:issue:`41505`) - Disallow passing non-keyword arguments to :meth:`Series.sort_values` (:issue:`41505`) +- Disallowed constructing :class:`Categorical` with scalar ``data`` (:issue:`38433`) +- Disallowed constructing :class:`CategoricalIndex` without passing ``data`` (:issue:`38944`) - Removed :meth:`.Rolling.validate`, :meth:`.Expanding.validate`, and :meth:`.ExponentialMovingWindow.validate` (:issue:`43665`) - Removed :attr:`Rolling.win_type` returning ``"freq"`` (:issue:`38963`) - Removed :attr:`Rolling.is_datetimelike` (:issue:`38963`) @@ -286,6 +291,8 @@ Removal of prior version deprecations/changes - Changed behavior of empty data passed into :class:`Series`; the default dtype will be ``object`` instead of ``float64`` (:issue:`29405`) - Changed the behavior of :func:`to_datetime` with argument "now" with ``utc=False`` to match ``Timestamp("now")`` (:issue:`18705`) - Changed behavior of :meth:`SparseArray.astype` when given a dtype that is not explicitly ``SparseDtype``, cast to the exact requested dtype rather than silently using a ``SparseDtype`` instead (:issue:`34457`) +- Changed behavior of :meth:`Index.ravel` to return a view on the original :class:`Index` instead of a ``np.ndarray`` (:issue:`36900`) +- Changed behavior of :meth:`Index.to_frame` with explicit ``name=None`` to use ``None`` for the column name instead of the index's name or default ``0`` (:issue:`45523`) - Changed behavior of :class:`DataFrame` constructor given floating-point ``data`` and an integer ``dtype``, when the data cannot be cast losslessly, the floating point dtype is retained, matching :class:`Series` behavior (:issue:`41170`) - Changed behavior of :class:`DataFrame` constructor when passed a ``dtype`` (other than int) that the data cannot be cast to; it now raises instead of silently ignoring the dtype (:issue:`41733`) - Changed the behavior of :class:`Series` constructor, it will no longer infer a datetime64 or timedelta64 dtype from string entries (:issue:`41731`) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index becca2b668290..4cacdb71f4175 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -15,7 +15,6 @@ cast, overload, ) -from warnings import warn import numpy as np @@ -40,7 +39,6 @@ type_t, ) from pandas.compat.numpy import function as nv -from pandas.util._exceptions import find_stack_level from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.cast import ( @@ -384,13 +382,7 @@ def __init__( if not is_list_like(values): # GH#38433 - warn( - "Allowing scalars in the Categorical constructor is deprecated " - "and will raise in a future version. Use `[value]` instead", - FutureWarning, - stacklevel=find_stack_level(), - ) - values = [values] + raise TypeError("Categorical input must be list-like") # null_mask indicates missing values we want to exclude from inference. # This means: only missing values in list-likes (not arrays/ndframes). diff --git a/pandas/core/base.py b/pandas/core/base.py index db81d3e396b50..ca310109ab2e6 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -17,7 +17,6 @@ final, overload, ) -import warnings import numpy as np @@ -38,7 +37,6 @@ cache_readonly, doc, ) -from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_categorical_dtype, @@ -1056,27 +1054,6 @@ def is_unique(self) -> bool: """ return self.nunique(dropna=False) == len(self) - @property - def is_monotonic(self) -> bool: - """ - Return boolean if values in the object are monotonically increasing. - - .. deprecated:: 1.5.0 - is_monotonic is deprecated and will be removed in a future version. - Use is_monotonic_increasing instead. - - Returns - ------- - bool - """ - warnings.warn( - "is_monotonic is deprecated and will be removed in a future version. " - "Use is_monotonic_increasing instead.", - FutureWarning, - stacklevel=find_stack_level(), - ) - return self.is_monotonic_increasing - @property def is_monotonic_increasing(self) -> bool: """ diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index d8300bb29c274..135d6818c7bdd 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -941,34 +941,19 @@ def dtype(self) -> DtypeObj: return self._data.dtype @final - def ravel(self, order: str_t = "C"): + def ravel(self, order: str_t = "C") -> Index: """ - Return an ndarray of the flattened values of the underlying data. + Return a view on self. Returns ------- - numpy.ndarray - Flattened array. + Index See Also -------- numpy.ndarray.ravel : Return a flattened array. """ - warnings.warn( - "Index.ravel returning ndarray is deprecated; in a future version " - "this will return a view on self.", - FutureWarning, - stacklevel=find_stack_level(), - ) - if needs_i8_conversion(self.dtype): - # Item "ndarray[Any, Any]" of "Union[ExtensionArray, ndarray[Any, Any]]" - # has no attribute "_ndarray" - values = self._data._ndarray # type: ignore[union-attr] - elif is_interval_dtype(self.dtype): - values = np.asarray(self._data) - else: - values = self._get_engine_target() - return values.ravel(order=order) + return self[:] def view(self, cls=None): @@ -1555,7 +1540,7 @@ def to_frame( index : bool, default True Set the index of the returned DataFrame as the original Index. - name : object, default None + name : object, defaults to index.name The passed name should substitute for the index name (if it has one). @@ -1597,17 +1582,6 @@ def to_frame( """ from pandas import DataFrame - if name is None: - warnings.warn( - "Explicitly passing `name=None` currently preserves the Index's name " - "or uses a default name of 0. This behaviour is deprecated, and in " - "the future `None` will be used as the name of the resulting " - "DataFrame column.", - FutureWarning, - stacklevel=find_stack_level(), - ) - name = lib.no_default - if name is lib.no_default: name = self._get_level_names() result = DataFrame({name: self._values.copy()}) @@ -2200,24 +2174,6 @@ def _can_hold_na(self) -> bool: return False return True - @final - @property - def is_monotonic(self) -> bool: - """ - Alias for is_monotonic_increasing. - - .. deprecated:: 1.5.0 - is_monotonic is deprecated and will be removed in a future version. - Use is_monotonic_increasing instead. - """ - warnings.warn( - "is_monotonic is deprecated and will be removed in a future version. " - "Use is_monotonic_increasing instead.", - FutureWarning, - stacklevel=find_stack_level(), - ) - return self.is_monotonic_increasing - @property def is_monotonic_increasing(self) -> bool: """ @@ -2629,20 +2585,6 @@ def _is_all_dates(self) -> bool: return False return is_datetime_array(ensure_object(self._values)) - @cache_readonly - @final - def is_all_dates(self) -> bool: - """ - Whether or not the index values only consist of dates. - """ - warnings.warn( - "Index.is_all_dates is deprecated, will be removed in a future version. " - "check index.inferred_type instead.", - FutureWarning, - stacklevel=find_stack_level(), - ) - return self._is_all_dates - @final @cache_readonly def _is_multi(self) -> bool: diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 58b533cb576d9..662e026495631 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -216,18 +216,8 @@ def __new__( name = maybe_extract_name(name, data, cls) - if data is None: - # GH#38944 - warnings.warn( - "Constructing a CategoricalIndex without passing data is " - "deprecated and will raise in a future version. " - "Use CategoricalIndex([], ...) instead.", - FutureWarning, - stacklevel=find_stack_level(), - ) - data = [] - if is_scalar(data): + # GH#38944 include None here, which pre-2.0 subbed in [] cls._raise_scalar_data_error(data) data = Categorical( diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 3b8380a88bb8b..33f29de1d1a90 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1738,17 +1738,6 @@ def to_frame( """ from pandas import DataFrame - if name is None: - warnings.warn( - "Explicitly passing `name=None` currently preserves the Index's name " - "or uses a default name of 0. This behaviour is deprecated, and in " - "the future `None` will be used as the name of the resulting " - "DataFrame column.", - FutureWarning, - stacklevel=find_stack_level(), - ) - name = lib.no_default - if name is not lib.no_default: if not is_list_like(name): raise TypeError("'name' must be a list / sequence of column names.") diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 22406d2871482..ac49cd4cd4330 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -15,7 +15,6 @@ cast, overload, ) -import warnings import numpy as np @@ -42,7 +41,6 @@ Timezone, npt, ) -from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, @@ -1294,27 +1292,8 @@ def calc_with_mask(carg, mask): return None -def to_time( - arg, - format=None, - infer_time_format: bool = False, - errors: DateTimeErrorChoices = "raise", -): - # GH#34145 - warnings.warn( - "`to_time` has been moved, should be imported from pandas.core.tools.times. " - "This alias will be removed in a future version.", - FutureWarning, - stacklevel=find_stack_level(), - ) - from pandas.core.tools.times import to_time - - return to_time(arg, format, infer_time_format, errors) - - __all__ = [ "DateParseError", "should_cache", "to_datetime", - "to_time", ] diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py index d11f4648ec632..570f04fae2c33 100644 --- a/pandas/tests/arrays/categorical/test_constructors.py +++ b/pandas/tests/arrays/categorical/test_constructors.py @@ -33,9 +33,9 @@ class TestCategoricalConstructors: - def test_categorical_scalar_deprecated(self): + def test_categorical_disallows_scalar(self): # GH#38433 - with tm.assert_produces_warning(FutureWarning): + with pytest.raises(TypeError, match="Categorical input must be list-like"): Categorical("A", categories=["A", "B"]) def test_categorical_1d_only(self): @@ -220,13 +220,6 @@ def test_constructor(self): assert len(cat.codes) == 1 assert cat.codes[0] == 0 - with tm.assert_produces_warning(FutureWarning): - # GH#38433 - cat = Categorical(1) - assert len(cat.categories) == 1 - assert cat.categories[0] == 1 - assert len(cat.codes) == 1 - assert cat.codes[0] == 0 # two arrays # - when the first is an integer dtype and the second is not # - when the resulting codes are all -1/NaN diff --git a/pandas/tests/indexes/categorical/test_category.py b/pandas/tests/indexes/categorical/test_category.py index 06c00123566ba..e375af797f409 100644 --- a/pandas/tests/indexes/categorical/test_category.py +++ b/pandas/tests/indexes/categorical/test_category.py @@ -38,11 +38,6 @@ def test_can_hold_identifiers(self): key = idx[0] assert idx._can_hold_identifiers_and_holds_name(key) is True - def test_pickle_compat_construction(self): - # Once the deprecation is enforced, we can use the parent class's test - with tm.assert_produces_warning(FutureWarning, match="without passing data"): - self._index_cls() - def test_insert(self, simple_index): ci = simple_index diff --git a/pandas/tests/indexes/categorical/test_constructors.py b/pandas/tests/indexes/categorical/test_constructors.py index def865e03ed7c..19e8ec19db641 100644 --- a/pandas/tests/indexes/categorical/test_constructors.py +++ b/pandas/tests/indexes/categorical/test_constructors.py @@ -11,17 +11,12 @@ class TestCategoricalIndexConstructors: - def test_construction_without_data_deprecated(self): - # Once the deprecation is enforced, we can add this case to - # test_construction_disallows_scalar - msg = "without passing data" - with tm.assert_produces_warning(FutureWarning, match=msg): - CategoricalIndex(categories=list("abcd"), ordered=False) - def test_construction_disallows_scalar(self): msg = "must be called with a collection of some kind" with pytest.raises(TypeError, match=msg): CategoricalIndex(data=1, categories=list("abcd"), ordered=False) + with pytest.raises(TypeError, match=msg): + CategoricalIndex(categories=list("abcd"), ordered=False) def test_construction(self): diff --git a/pandas/tests/indexes/datetimes/methods/test_to_frame.py b/pandas/tests/indexes/datetimes/methods/test_to_frame.py index fa5cca1c3e78b..c829109d4e06c 100644 --- a/pandas/tests/indexes/datetimes/methods/test_to_frame.py +++ b/pandas/tests/indexes/datetimes/methods/test_to_frame.py @@ -19,13 +19,10 @@ def test_to_frame_respects_none_name(self): # not changed to 0 # GH-45448 this is first deprecated to only change in the future idx = date_range(start="2019-01-01", end="2019-01-30", freq="D", tz="UTC") - with tm.assert_produces_warning(FutureWarning): - result = idx.to_frame(name=None) - # exp_idx = Index([None], dtype=object) - exp_idx = Index([0]) + result = idx.to_frame(name=None) + exp_idx = Index([None], dtype=object) tm.assert_index_equal(exp_idx, result.columns) - with tm.assert_produces_warning(FutureWarning): - result = idx.rename("foo").to_frame(name=None) - exp_idx = Index(["foo"], dtype=object) + result = idx.rename("foo").to_frame(name=None) + exp_idx = Index([None], dtype=object) tm.assert_index_equal(exp_idx, result.columns) diff --git a/pandas/tests/indexes/test_any_index.py b/pandas/tests/indexes/test_any_index.py index 6868279776a91..6d4e7caacc5e4 100644 --- a/pandas/tests/indexes/test_any_index.py +++ b/pandas/tests/indexes/test_any_index.py @@ -61,10 +61,10 @@ def test_view_preserves_name(index): assert index.view().name == index.name -def test_ravel_deprecation(index): - # GH#19956 ravel returning ndarray is deprecated - with tm.assert_produces_warning(FutureWarning): - index.ravel() +def test_ravel(index): + # GH#19956 ravel returning ndarray is deprecated, in 2.0 returns a view on self + res = index.ravel() + tm.assert_index_equal(res, index) class TestConversion: diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 523decba33b6e..60cf3365fc7de 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -664,23 +664,6 @@ def test_is_numeric(self, index, expected): def test_is_object(self, index, expected): assert index.is_object() is expected - @pytest.mark.parametrize( - "index, expected", - [ - ("string", False), - ("bool-object", False), - ("bool-dtype", False), - ("categorical", False), - ("int", False), - ("datetime", True), - ("float", False), - ], - indirect=["index"], - ) - def test_is_all_dates(self, index, expected): - with tm.assert_produces_warning(FutureWarning): - assert index.is_all_dates is expected - def test_summary(self, index): index._summary() diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py index 5214e280718f0..4be0aa15523fb 100644 --- a/pandas/tests/series/test_api.py +++ b/pandas/tests/series/test_api.py @@ -171,9 +171,7 @@ def test_attrs(self): def test_inspect_getmembers(self): # GH38782 ser = Series(dtype=object) - # TODO(2.0): Change to None once is_monotonic deprecation - # is enforced - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + with tm.assert_produces_warning(None, check_stacklevel=False): inspect.getmembers(ser) def test_unknown_attribute(self): diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 1ab04daca60b7..39767e5c8f226 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -1965,8 +1965,6 @@ class TestSeriesConstructorIndexCoercion: def test_series_constructor_datetimelike_index_coercion(self): idx = tm.makeDateIndex(10000) ser = Series(np.random.randn(len(idx)), idx.astype(object)) - with tm.assert_produces_warning(FutureWarning): - assert ser.index.is_all_dates # as of 2.0, we no longer silently cast the object-dtype index # to DatetimeIndex GH#39307, GH#23598 assert not isinstance(ser.index, DatetimeIndex) diff --git a/pandas/tests/tools/test_to_time.py b/pandas/tests/tools/test_to_time.py index c80b1e080a1d1..5046fd9d0edc1 100644 --- a/pandas/tests/tools/test_to_time.py +++ b/pandas/tests/tools/test_to_time.py @@ -8,7 +8,6 @@ from pandas import Series import pandas._testing as tm -from pandas.core.tools.datetimes import to_time as to_time_alias from pandas.core.tools.times import to_time # The tests marked with this are locale-dependent. @@ -69,12 +68,3 @@ def test_arraylike(self): res = to_time(np.array(arg)) assert isinstance(res, list) assert res == expected_arr - - -def test_to_time_alias(): - expected = time(14, 15) - - with tm.assert_produces_warning(FutureWarning): - result = to_time_alias(expected) - - assert result == expected
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/49457
2022-11-01T22:15:03Z
2022-11-03T16:14:59Z
2022-11-03T16:14:59Z
2022-11-03T16:30:26Z
API: avoid silent consolidation
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index 20e99d007c798..8aeb577747354 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -348,6 +348,7 @@ Other API changes - Changed behavior of :class:`Index` constructor with sequence containing at least one ``NaT`` and everything else either ``None`` or ``NaN`` to infer ``datetime64[ns]`` dtype instead of ``object``, matching :class:`Series` behavior (:issue:`49340`) - :func:`read_stata` with parameter ``index_col`` set to ``None`` (the default) will now set the index on the returned :class:`DataFrame` to a :class:`RangeIndex` instead of a :class:`Int64Index` (:issue:`49745`) - Changed behavior of :class:`Index` constructor with an object-dtype ``numpy.ndarray`` containing all-``bool`` values or all-complex values, this will now retain object dtype, consistent with the :class:`Series` behavior (:issue:`49594`) +- :meth:`DataFrame.values`, :meth:`DataFrame.to_numpy`, :meth:`DataFrame.xs`, :meth:`DataFrame.reindex`, :meth:`DataFrame.fillna`, and :meth:`DataFrame.replace` no longer silently consolidate the underlying arrays; do ``df = df.copy()`` to ensure consolidation (:issue:`49356`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 7b181a3e8e391..376281717219e 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -165,6 +165,7 @@ ) from pandas.core.arrays.sparse import SparseFrameAccessor from pandas.core.construction import ( + ensure_wrapped_if_datetimelike, extract_array, sanitize_array, sanitize_masked_array, @@ -960,8 +961,6 @@ def _values( # type: ignore[override] """ Analogue to ._values that may return a 2D ExtensionArray. """ - self._consolidate_inplace() - mgr = self._mgr if isinstance(mgr, ArrayManager): @@ -969,11 +968,11 @@ def _values( # type: ignore[override] # error: Item "ExtensionArray" of "Union[ndarray, ExtensionArray]" # has no attribute "reshape" return mgr.arrays[0].reshape(-1, 1) # type: ignore[union-attr] - return self.values + return ensure_wrapped_if_datetimelike(self.values) blocks = mgr.blocks if len(blocks) != 1: - return self.values + return ensure_wrapped_if_datetimelike(self.values) arr = blocks[0].values if arr.ndim == 1: @@ -1804,7 +1803,6 @@ def to_numpy( array([[1, 3.0, Timestamp('2000-01-01 00:00:00')], [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object) """ - self._consolidate_inplace() if dtype is not None: dtype = np.dtype(dtype) result = self._mgr.as_array(dtype=dtype, copy=copy, na_value=na_value) @@ -11291,7 +11289,6 @@ def values(self) -> np.ndarray: ['lion', 80.5, 1], ['monkey', nan, None]], dtype=object) """ - self._consolidate_inplace() return self._mgr.as_array() @overload diff --git a/pandas/core/generic.py b/pandas/core/generic.py index fa35060c8aff8..1b17deb7def90 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3777,7 +3777,6 @@ def _take( See the docstring of `take` for full explanation of the parameters. """ - self._consolidate_inplace() new_data = self._mgr.take( indices, @@ -3934,8 +3933,6 @@ class animal locomotion else: index = self.index - self._consolidate_inplace() - if isinstance(index, MultiIndex): loc, new_index = index._get_loc_level(key, level=0) if not drop_level: @@ -5190,8 +5187,6 @@ def reindex(self: NDFrameT, *args, **kwargs) -> NDFrameT: f'argument "{list(kwargs.keys())[0]}"' ) - self._consolidate_inplace() - # if all axes that are requested to reindex are equal, then only copy # if indicated must have index names equal here as well as values if all( @@ -6730,8 +6725,6 @@ def fillna( inplace = validate_bool_kwarg(inplace, "inplace") value, method = validate_fillna_kwargs(value, method) - self._consolidate_inplace() - # set the default here, so functions examining the signaure # can detect if something was set (e.g. in groupby) (GH9221) if axis is None: @@ -7049,8 +7042,6 @@ def replace( if not is_bool(regex) and to_replace is not None: raise ValueError("'to_replace' must be 'None' if 'regex' is not a bool") - self._consolidate_inplace() - if value is lib.no_default or method is not lib.no_default: # GH#36984 if the user explicitly passes value=None we want to # respect that. We have the corner case where the user explicitly diff --git a/pandas/tests/frame/methods/test_values.py b/pandas/tests/frame/methods/test_values.py index f755b0addfd6d..1f134af68be6b 100644 --- a/pandas/tests/frame/methods/test_values.py +++ b/pandas/tests/frame/methods/test_values.py @@ -256,11 +256,7 @@ def test_private_values_dt64tz_multicol(self): df2 = df - df tm.assert_equal(df2._values, tda) - def test_private_values_dt64_multiblock(self, using_array_manager, request): - if using_array_manager: - mark = pytest.mark.xfail(reason="returns ndarray") - request.node.add_marker(mark) - + def test_private_values_dt64_multiblock(self): dta = date_range("2000", periods=8)._data df = DataFrame({"A": dta[:4]}, copy=False) diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py index 57e498defccc1..a7f9b14f44674 100644 --- a/pandas/tests/frame/test_block_internals.py +++ b/pandas/tests/frame/test_block_internals.py @@ -85,12 +85,6 @@ def test_consolidate_inplace(self, float_frame): for letter in range(ord("A"), ord("Z")): float_frame[chr(letter)] = chr(letter) - def test_values_consolidate(self, float_frame): - float_frame["E"] = 7.0 - assert not float_frame._mgr.is_consolidated() - _ = float_frame.values - assert float_frame._mgr.is_consolidated() - def test_modify_values(self, float_frame): float_frame.values[5] = 5 assert (float_frame.values[5] == 5).all() @@ -99,10 +93,10 @@ def test_modify_values(self, float_frame): float_frame["E"] = 7.0 col = float_frame["E"] float_frame.values[6] = 6 - assert (float_frame.values[6] == 6).all() + # as of 2.0 .values does not consolidate, so subsequent calls to .values + # does not share data + assert not (float_frame.values[6] == 6).all() - # check that item_cache was cleared - assert float_frame["E"] is not col assert (col == 7).all() def test_boolean_set_uncons(self, float_frame):
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/49456
2022-11-01T21:59:34Z
2022-11-22T21:22:44Z
2022-11-22T21:22:44Z
2022-11-22T21:28:27Z
DEPR: DatetimeIndex setops with mismatched tzs
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index 6d1f2afab3c6d..c5fb8444680db 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -284,6 +284,7 @@ Removal of prior version deprecations/changes - Removed the deprecated method ``mad`` from pandas classes (:issue:`11787`) - Removed the deprecated method ``tshift`` from pandas classes (:issue:`11631`) - Changed behavior of empty data passed into :class:`Series`; the default dtype will be ``object`` instead of ``float64`` (:issue:`29405`) +- Changed the behavior of :meth:`DatetimeIndex.union`, :meth:`DatetimeIndex.intersection`, and :meth:`DatetimeIndex.symmetric_difference` with mismatched timezones to convert to UTC instead of casting to object dtype (:issue:`39328`) - Changed the behavior of :func:`to_datetime` with argument "now" with ``utc=False`` to match ``Timestamp("now")`` (:issue:`18705`) - Changed behavior of :class:`DataFrame` constructor given floating-point ``data`` and an integer ``dtype``, when the data cannot be cast losslessly, the floating point dtype is retained, matching :class:`Series` behavior (:issue:`41170`) - Changed behavior of :class:`DataFrame` constructor when passed a ``dtype`` (other than int) that the data cannot be cast to; it now raises instead of silently ignoring the dtype (:issue:`41733`) diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py index 58dd207bb4353..c562eaffd241d 100644 --- a/pandas/core/indexes/api.py +++ b/pandas/core/indexes/api.py @@ -13,7 +13,6 @@ from pandas.errors import InvalidIndexError from pandas.core.dtypes.cast import find_common_type -from pandas.core.dtypes.common import is_dtype_equal from pandas.core.algorithms import safe_sort from pandas.core.indexes.base import ( @@ -276,7 +275,6 @@ def _find_common_index_dtype(inds): if kind == "special": result = indexes[0] - first = result dtis = [x for x in indexes if isinstance(x, DatetimeIndex)] dti_tzs = [x for x in dtis if x.tz is not None] @@ -289,12 +287,6 @@ def _find_common_index_dtype(inds): if len(dtis) == len(indexes): sort = True - if not all(is_dtype_equal(x.dtype, first.dtype) for x in indexes): - # i.e. timezones mismatch - # TODO(2.0): once deprecation is enforced, this union will - # cast to UTC automatically. - indexes = [x.tz_convert("UTC") for x in indexes] - result = indexes[0] elif len(dtis) > 1: diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index d8300bb29c274..6418498d5e3bb 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3075,10 +3075,9 @@ def _validate_sort_keyword(self, sort): ) @final - def _deprecate_dti_setop(self, other: Index, setop: str_t) -> None: + def _dti_setop_align_tzs(self, other: Index, setop: str_t) -> tuple[Index, Index]: """ - Deprecate setop behavior between timezone-aware DatetimeIndexes with - mismatched timezones. + With mismatched timezones, cast both to UTC. """ # Caller is responsibelf or checking # `not is_dtype_equal(self.dtype, other.dtype)` @@ -3089,14 +3088,10 @@ def _deprecate_dti_setop(self, other: Index, setop: str_t) -> None: and other.tz is not None ): # GH#39328, GH#45357 - warnings.warn( - f"In a future version, the {setop} of DatetimeIndex objects " - "with mismatched timezones will cast both to UTC instead of " - "object dtype. To retain the old behavior, " - f"use `index.astype(object).{setop}(other)`", - FutureWarning, - stacklevel=find_stack_level(), - ) + left = self.tz_convert("UTC") + right = other.tz_convert("UTC") + return left, right + return self, other @final def union(self, other, sort=None): @@ -3196,7 +3191,7 @@ def union(self, other, sort=None): "Can only union MultiIndex with MultiIndex or Index of tuples, " "try mi.to_flat_index().union(other) instead." ) - self._deprecate_dti_setop(other, "union") + self, other = self._dti_setop_align_tzs(other, "union") dtype = self._find_common_type_compat(other) left = self.astype(dtype, copy=False) @@ -3333,7 +3328,7 @@ def intersection(self, other, sort: bool = False): other, result_name = self._convert_can_do_setop(other) if not is_dtype_equal(self.dtype, other.dtype): - self._deprecate_dti_setop(other, "intersection") + self, other = self._dti_setop_align_tzs(other, "intersection") if self.equals(other): if self.has_duplicates: @@ -3481,7 +3476,7 @@ def difference(self, other, sort=None): self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) - # Note: we do NOT call _deprecate_dti_setop here, as there + # Note: we do NOT call _dti_setop_align_tzs here, as there # is no requirement that .difference be commutative, so it does # not cast to object. @@ -3565,7 +3560,7 @@ def symmetric_difference(self, other, result_name=None, sort=None): result_name = result_name_update if not is_dtype_equal(self.dtype, other.dtype): - self._deprecate_dti_setop(other, "symmetric_difference") + self, other = self._dti_setop_align_tzs(other, "symmetric_difference") if not self._should_compare(other): return self.union(other, sort=sort).rename(result_name) diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 667deec23757f..73e25f9fe2f06 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -429,18 +429,6 @@ def _can_range_setop(self, other) -> bool: return False return super()._can_range_setop(other) - def _maybe_utc_convert(self, other: Index) -> tuple[DatetimeIndex, Index]: - this = self - - if isinstance(other, DatetimeIndex): - if (self.tz is None) ^ (other.tz is None): - raise TypeError("Cannot join tz-naive with tz-aware DatetimeIndex") - - if not timezones.tz_compare(self.tz, other.tz): - this = self.tz_convert("UTC") - other = other.tz_convert("UTC") - return this, other - # -------------------------------------------------------------------- def _get_time_micros(self) -> npt.NDArray[np.int64]: diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py index a07f21f785828..0bc2862e55021 100644 --- a/pandas/tests/indexes/datetimes/test_timezones.py +++ b/pandas/tests/indexes/datetimes/test_timezones.py @@ -1155,19 +1155,21 @@ def test_dti_convert_tz_aware_datetime_datetime(self, tz): @pytest.mark.parametrize("setop", ["union", "intersection", "symmetric_difference"]) def test_dti_setop_aware(self, setop): # non-overlapping + # GH#39328 as of 2.0 we cast these to UTC instead of object rng = date_range("2012-11-15 00:00:00", periods=6, freq="H", tz="US/Central") rng2 = date_range("2012-11-15 12:00:00", periods=6, freq="H", tz="US/Eastern") - with tm.assert_produces_warning(FutureWarning): - # # GH#39328 will cast both to UTC - result = getattr(rng, setop)(rng2) + result = getattr(rng, setop)(rng2) - expected = getattr(rng.astype("O"), setop)(rng2.astype("O")) + left = rng.tz_convert("UTC") + right = rng2.tz_convert("UTC") + expected = getattr(left, setop)(right) tm.assert_index_equal(result, expected) + assert result.tz == left.tz if len(result): - assert result[0].tz.zone == "US/Central" - assert result[-1].tz.zone == "US/Eastern" + assert result[0].tz.zone == "UTC" + assert result[-1].tz.zone == "UTC" def test_dti_union_mixed(self): # GH 21671
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/49455
2022-11-01T21:41:21Z
2022-11-02T18:25:39Z
2022-11-02T18:25:39Z
2022-11-02T18:25:45Z
DEPR: __setitem__ on dt64tz with mixed timezones
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index 5575ae8db2776..97556a06c0ecb 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -305,12 +305,14 @@ Removal of prior version deprecations/changes - Changed the behavior of :class:`Series` constructor, it will no longer infer a datetime64 or timedelta64 dtype from string entries (:issue:`41731`) - Changed behavior of :class:`Timestamp` constructor with a ``np.datetime64`` object and a ``tz`` passed to interpret the input as a wall-time as opposed to a UTC time (:issue:`42288`) - Changed behavior of :class:`Index` constructor when passed a ``SparseArray`` or ``SparseDtype`` to retain that dtype instead of casting to ``numpy.ndarray`` (:issue:`43930`) +- Changed behavior of setitem-like operations (``__setitem__``, ``fillna``, ``where``, ``mask``, ``replace``, ``insert``, fill_value for ``shift``) on an object with :class:`DatetimeTZDtype` when using a value with a non-matching timezone, the value will be cast to the object's timezone instead of casting both to object-dtype (:issue:`44243`) - Changed behavior of :class:`Index`, :class:`Series`, :class:`DataFrame` constructors with floating-dtype data and a :class:`DatetimeTZDtype`, the data are now interpreted as UTC-times instead of wall-times, consistent with how integer-dtype data are treated (:issue:`45573`) - Removed the deprecated ``base`` and ``loffset`` arguments from :meth:`pandas.DataFrame.resample`, :meth:`pandas.Series.resample` and :class:`pandas.Grouper`. Use ``offset`` or ``origin`` instead (:issue:`31809`) - Changed behavior of :meth:`DataFrame.any` and :meth:`DataFrame.all` with ``bool_only=True``; object-dtype columns with all-bool values will no longer be included, manually cast to ``bool`` dtype first (:issue:`46188`) - Changed behavior of comparison of a :class:`Timestamp` with a ``datetime.date`` object; these now compare as un-equal and raise on inequality comparisons, matching the ``datetime.datetime`` behavior (:issue:`36131`) - Enforced deprecation of silently dropping columns that raised a ``TypeError`` in :class:`Series.transform` and :class:`DataFrame.transform` when used with a list or dictionary (:issue:`43740`) - Change behavior of :meth:`DataFrame.apply` with list-like so that any partial failure will raise an error (:issue:`43740`) +- .. --------------------------------------------------------------------------- .. _whatsnew_200.performance: @@ -371,7 +373,7 @@ Timedelta Timezones ^^^^^^^^^ -- +- Bug in :meth:`Series.astype` and :meth:`DataFrame.astype` with object-dtype containing multiple timezone-aware ``datetime`` objects with heterogeneous timezones to a :class:`DatetimeTZDtype` incorrectly raising (:issue:`32581`) - Numeric diff --git a/pandas/_libs/tslib.pyi b/pandas/_libs/tslib.pyi index 8fec9ecf27f30..ac8d5bac7c6e7 100644 --- a/pandas/_libs/tslib.pyi +++ b/pandas/_libs/tslib.pyi @@ -28,3 +28,7 @@ def array_to_datetime( ) -> tuple[np.ndarray, tzinfo | None]: ... # returned ndarray may be object dtype or datetime64[ns] + +def array_to_datetime_with_tz( + values: npt.NDArray[np.object_], tz: tzinfo +) -> npt.NDArray[np.int64]: ... diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index d7c0c91332e02..bf912005ae57e 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -6,6 +6,7 @@ from cpython.datetime cimport ( import_datetime, tzinfo, ) +from cpython.object cimport PyObject # import datetime C API import_datetime() @@ -862,3 +863,50 @@ cdef inline bint _parse_today_now(str val, int64_t* iresult, bint utc): iresult[0] = Timestamp.today().value return True return False + + +def array_to_datetime_with_tz(ndarray values, tzinfo tz): + """ + Vectorized analogue to pd.Timestamp(value, tz=tz) + + values has object-dtype, unrestricted ndim. + + Major differences between this and array_to_datetime with utc=True + - np.datetime64 objects are treated as _wall_ times. + - tznaive datetimes are treated as _wall_ times. + """ + cdef: + ndarray result = cnp.PyArray_EMPTY(values.ndim, values.shape, cnp.NPY_INT64, 0) + cnp.broadcast mi = cnp.PyArray_MultiIterNew2(result, values) + Py_ssize_t i, n = values.size + object item + int64_t ival + datetime ts + + for i in range(n): + # Analogous to `item = values[i]` + item = <object>(<PyObject**>cnp.PyArray_MultiIter_DATA(mi, 1))[0] + + if checknull_with_nat_and_na(item): + # this catches pd.NA which would raise in the Timestamp constructor + ival = NPY_NAT + + else: + ts = Timestamp(item) + if ts is NaT: + ival = NPY_NAT + else: + if ts.tz is not None: + ts = ts.tz_convert(tz) + else: + # datetime64, tznaive pydatetime, int, float + ts = ts.tz_localize(tz) + ts = ts._as_unit("ns") + ival = ts.value + + # Analogous to: result[i] = ival + (<int64_t*>cnp.PyArray_MultiIter_DATA(mi, 0))[0] = ival + + cnp.PyArray_MultiIter_NEXT(mi) + + return result diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 17ea71c8d29a4..b4198575c3f06 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -217,7 +217,7 @@ def _scalar_from_string(self, value: str) -> DTScalarOrNaT: raise AbstractMethodError(self) def _unbox_scalar( - self, value: DTScalarOrNaT, setitem: bool = False + self, value: DTScalarOrNaT ) -> np.int64 | np.datetime64 | np.timedelta64: """ Unbox the integer value of a scalar `value`. @@ -226,8 +226,6 @@ def _unbox_scalar( ---------- value : Period, Timestamp, Timedelta, or NaT Depending on subclass. - setitem : bool, default False - Whether to check compatibility with setitem strictness. Returns ------- @@ -240,9 +238,7 @@ def _unbox_scalar( """ raise AbstractMethodError(self) - def _check_compatible_with( - self, other: DTScalarOrNaT, setitem: bool = False - ) -> None: + def _check_compatible_with(self, other: DTScalarOrNaT) -> None: """ Verify that `self` and `other` are compatible. @@ -255,9 +251,6 @@ def _check_compatible_with( Parameters ---------- other - setitem : bool, default False - For __setitem__ we may have stricter compatibility restrictions than - for comparisons. Raises ------ @@ -663,7 +656,7 @@ def _validate_scalar( # this option exists to prevent a performance hit in # TimedeltaIndex.get_loc return value - return self._unbox_scalar(value, setitem=setitem) + return self._unbox_scalar(value) def _validation_error_message(self, value, allow_listlike: bool = False) -> str: """ @@ -757,19 +750,18 @@ def _validate_setitem_value(self, value): else: return self._validate_scalar(value, allow_listlike=True) - return self._unbox(value, setitem=True) + return self._unbox(value) - def _unbox( - self, other, setitem: bool = False - ) -> np.int64 | np.datetime64 | np.timedelta64 | np.ndarray: + @final + def _unbox(self, other) -> np.int64 | np.datetime64 | np.timedelta64 | np.ndarray: """ Unbox either a scalar with _unbox_scalar or an instance of our own type. """ if lib.is_scalar(other): - other = self._unbox_scalar(other, setitem=setitem) + other = self._unbox_scalar(other) else: # same type as self - self._check_compatible_with(other, setitem=setitem) + self._check_compatible_with(other) other = other._ndarray return other diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index babbf90d92175..5fadb59056df6 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -484,36 +484,19 @@ def _generate_range( # type: ignore[override] # ----------------------------------------------------------------- # DatetimeLike Interface - def _unbox_scalar(self, value, setitem: bool = False) -> np.datetime64: + def _unbox_scalar(self, value) -> np.datetime64: if not isinstance(value, self._scalar_type) and value is not NaT: raise ValueError("'value' should be a Timestamp.") - self._check_compatible_with(value, setitem=setitem) + self._check_compatible_with(value) return value.asm8 def _scalar_from_string(self, value) -> Timestamp | NaTType: return Timestamp(value, tz=self.tz) - def _check_compatible_with(self, other, setitem: bool = False): + def _check_compatible_with(self, other) -> None: if other is NaT: return self._assert_tzawareness_compat(other) - if setitem: - # Stricter check for setitem vs comparison methods - if self.tz is not None and not timezones.tz_compare(self.tz, other.tz): - # TODO(2.0): remove this check. GH#37605 - warnings.warn( - "Setitem-like behavior with mismatched timezones is deprecated " - "and will change in a future version. Instead of raising " - "(or for Index, Series, and DataFrame methods, coercing to " - "object dtype), the value being set (or passed as a " - "fill_value, or inserted) will be cast to the existing " - "DatetimeArray/DatetimeIndex/Series/DataFrame column's " - "timezone. To retain the old behavior, explicitly cast to " - "object dtype before the operation.", - FutureWarning, - stacklevel=find_stack_level(), - ) - raise ValueError(f"Timezones don't match. '{self.tz}' != '{other.tz}'") # ----------------------------------------------------------------- # Descriptive Properties @@ -2030,6 +2013,11 @@ def _sequence_to_dt64ns( copy = False if lib.infer_dtype(data, skipna=False) == "integer": data = data.astype(np.int64) + elif tz is not None and ambiguous == "raise": + # TODO: yearfirst/dayfirst/etc? + obj_data = np.asarray(data, dtype=object) + i8data = tslib.array_to_datetime_with_tz(obj_data, tz) + return i8data.view(DT64NS_DTYPE), tz, None else: # data comes back here as either i8 to denote UTC timestamps # or M8[ns] to denote wall times diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 5e1b0c4b18718..3b21cc1ecff48 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -328,13 +328,12 @@ def _generate_range(cls, start, end, periods, freq, fields): def _unbox_scalar( # type: ignore[override] self, value: Period | NaTType, - setitem: bool = False, ) -> np.int64: if value is NaT: # error: Item "Period" of "Union[Period, NaTType]" has no attribute "value" return np.int64(value.value) # type: ignore[union-attr] elif isinstance(value, self._scalar_type): - self._check_compatible_with(value, setitem=setitem) + self._check_compatible_with(value) return np.int64(value.ordinal) else: raise ValueError(f"'value' should be a Period. Got '{value}' instead.") @@ -342,7 +341,7 @@ def _unbox_scalar( # type: ignore[override] def _scalar_from_string(self, value: str) -> Period: return Period(value, freq=self.freq) - def _check_compatible_with(self, other, setitem: bool = False) -> None: + def _check_compatible_with(self, other) -> None: if other is NaT: return self._require_matching_freq(other) diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 5bb4ae94d1849..fbc662ffb8dc9 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -292,10 +292,10 @@ def _generate_range(cls, start, end, periods, freq, closed=None): # ---------------------------------------------------------------- # DatetimeLike Interface - def _unbox_scalar(self, value, setitem: bool = False) -> np.timedelta64: + def _unbox_scalar(self, value) -> np.timedelta64: if not isinstance(value, self._scalar_type) and value is not NaT: raise ValueError("'value' should be a Timedelta.") - self._check_compatible_with(value, setitem=setitem) + self._check_compatible_with(value) if value is NaT: return np.timedelta64(value.value, "ns") else: @@ -304,7 +304,7 @@ def _unbox_scalar(self, value, setitem: bool = False) -> np.timedelta64: def _scalar_from_string(self, value) -> Timedelta | NaTType: return Timedelta(value) - def _check_compatible_with(self, other, setitem: bool = False) -> None: + def _check_compatible_with(self, other) -> None: # we don't have anything to validate. pass diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index 72b2cd15d3222..3f310d0efa2ca 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -292,19 +292,7 @@ def test_searchsorted(self): assert result == 10 @pytest.mark.parametrize("box", [None, "index", "series"]) - def test_searchsorted_castable_strings(self, arr1d, box, request, string_storage): - if isinstance(arr1d, DatetimeArray): - tz = arr1d.tz - ts1, ts2 = arr1d[1:3] - if tz is not None and ts1.tz.tzname(ts1) != ts2.tz.tzname(ts2): - # If we have e.g. tzutc(), when we cast to string and parse - # back we get pytz.UTC, and then consider them different timezones - # so incorrectly raise. - mark = pytest.mark.xfail( - raises=TypeError, reason="timezone comparisons inconsistent" - ) - request.node.add_marker(mark) - + def test_searchsorted_castable_strings(self, arr1d, box, string_storage): arr = arr1d if box is None: pass @@ -461,19 +449,8 @@ def test_setitem_object_dtype(self, box, arr1d): tm.assert_equal(arr1d, expected) - def test_setitem_strs(self, arr1d, request): + def test_setitem_strs(self, arr1d): # Check that we parse strs in both scalar and listlike - if isinstance(arr1d, DatetimeArray): - tz = arr1d.tz - ts1, ts2 = arr1d[-2:] - if tz is not None and ts1.tz.tzname(ts1) != ts2.tz.tzname(ts2): - # If we have e.g. tzutc(), when we cast to string and parse - # back we get pytz.UTC, and then consider them different timezones - # so incorrectly raise. - mark = pytest.mark.xfail( - raises=TypeError, reason="timezone comparisons inconsistent" - ) - request.node.add_marker(mark) # Setting list-like of strs expected = arr1d.copy() @@ -852,18 +829,14 @@ def test_take_fill_valid(self, arr1d, fixed_now_ts): # GH#37356 # Assuming here that arr1d fixture does not include Australia/Melbourne value = fixed_now_ts.tz_localize("Australia/Melbourne") - msg = "Timezones don't match. .* != 'Australia/Melbourne'" - with pytest.raises(ValueError, match=msg): - # require tz match, not just tzawareness match - with tm.assert_produces_warning( - FutureWarning, match="mismatched timezone" - ): - result = arr.take([-1, 1], allow_fill=True, fill_value=value) - - # once deprecation is enforced - # expected = arr.take([-1, 1], allow_fill=True, - # fill_value=value.tz_convert(arr.dtype.tz)) - # tm.assert_equal(result, expected) + result = arr.take([-1, 1], allow_fill=True, fill_value=value) + + expected = arr.take( + [-1, 1], + allow_fill=True, + fill_value=value.tz_convert(arr.dtype.tz), + ) + tm.assert_equal(result, expected) def test_concat_same_type_invalid(self, arr1d): # different timezones diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py index babab81dfbe57..37a9c19627ada 100644 --- a/pandas/tests/arrays/test_datetimes.py +++ b/pandas/tests/arrays/test_datetimes.py @@ -429,19 +429,16 @@ def test_setitem_str_impute_tz(self, tz_naive_fixture): tm.assert_equal(arr, expected) def test_setitem_different_tz_raises(self): + # pre-2.0 we required exact tz match, in 2.0 we require only + # tzawareness-match data = np.array([1, 2, 3], dtype="M8[ns]") arr = DatetimeArray(data, copy=False, dtype=DatetimeTZDtype(tz="US/Central")) with pytest.raises(TypeError, match="Cannot compare tz-naive and tz-aware"): arr[0] = pd.Timestamp("2000") ts = pd.Timestamp("2000", tz="US/Eastern") - with pytest.raises(ValueError, match="US/Central"): - with tm.assert_produces_warning( - FutureWarning, match="mismatched timezones" - ): - arr[0] = ts - # once deprecation is enforced - # assert arr[0] == ts.tz_convert("US/Central") + arr[0] = ts + assert arr[0] == ts.tz_convert("US/Central") def test_setitem_clears_freq(self): a = DatetimeArray(pd.date_range("2000", periods=2, freq="D", tz="US/Central")) @@ -688,23 +685,16 @@ def test_shift_value_tzawareness_mismatch(self): dta.shift(1, fill_value=invalid) def test_shift_requires_tzmatch(self): - # since filling is setitem-like, we require a matching timezone, - # not just matching tzawawreness + # pre-2.0 we required exact tz match, in 2.0 we require just + # matching tzawareness dti = pd.date_range("2016-01-01", periods=3, tz="UTC") dta = dti._data fill_value = pd.Timestamp("2020-10-18 18:44", tz="US/Pacific") - msg = "Timezones don't match. 'UTC' != 'US/Pacific'" - with pytest.raises(ValueError, match=msg): - with tm.assert_produces_warning( - FutureWarning, match="mismatched timezones" - ): - dta.shift(1, fill_value=fill_value) - - # once deprecation is enforced - # expected = dta.shift(1, fill_value=fill_value.tz_convert("UTC")) - # tm.assert_equal(result, expected) + result = dta.shift(1, fill_value=fill_value) + expected = dta.shift(1, fill_value=fill_value.tz_convert("UTC")) + tm.assert_equal(result, expected) def test_tz_localize_t2d(self): dti = pd.date_range("1994-05-12", periods=12, tz="US/Pacific") diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index 9eaba56a23e0f..646d275bbfc40 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -1162,20 +1162,15 @@ def test_replace_datetimetz(self): result = result.replace({"A": pd.NaT}, Timestamp("20130104", tz="US/Eastern")) tm.assert_frame_equal(result, expected) - # coerce to object + # pre-2.0 this would coerce to object with mismatched tzs result = df.copy() result.iloc[1, 0] = np.nan - with tm.assert_produces_warning(FutureWarning, match="mismatched timezone"): - result = result.replace( - {"A": pd.NaT}, Timestamp("20130104", tz="US/Pacific") - ) + result = result.replace({"A": pd.NaT}, Timestamp("20130104", tz="US/Pacific")) expected = DataFrame( { "A": [ Timestamp("20130101", tz="US/Eastern"), - Timestamp("20130104", tz="US/Pacific"), - # once deprecation is enforced - # Timestamp("20130104", tz="US/Pacific").tz_convert("US/Eastern"), + Timestamp("20130104", tz="US/Pacific").tz_convert("US/Eastern"), Timestamp("20130103", tz="US/Eastern"), ], "B": [0, np.nan, 2], diff --git a/pandas/tests/indexes/datetimes/methods/test_insert.py b/pandas/tests/indexes/datetimes/methods/test_insert.py index 592f4240ee750..2478a3ba799ad 100644 --- a/pandas/tests/indexes/datetimes/methods/test_insert.py +++ b/pandas/tests/indexes/datetimes/methods/test_insert.py @@ -193,36 +193,26 @@ def test_insert_mismatched_tzawareness(self): # TODO: also changes DataFrame.__setitem__ with expansion def test_insert_mismatched_tz(self): # see GH#7299 + # pre-2.0 with mismatched tzs we would cast to object idx = date_range("1/1/2000", periods=3, freq="D", tz="Asia/Tokyo", name="idx") # mismatched tz -> cast to object (could reasonably cast to same tz or UTC) item = Timestamp("2000-01-04", tz="US/Eastern") - with tm.assert_produces_warning(FutureWarning, match="mismatched timezone"): - result = idx.insert(3, item) + result = idx.insert(3, item) expected = Index( - list(idx[:3]) + [item] + list(idx[3:]), - dtype=object, - # once deprecation is enforced - # list(idx[:3]) + [item.tz_convert(idx.tz)] + list(idx[3:]), + list(idx[:3]) + [item.tz_convert(idx.tz)] + list(idx[3:]), name="idx", ) - # once deprecation is enforced - # assert expected.dtype == idx.dtype + assert expected.dtype == idx.dtype tm.assert_index_equal(result, expected) - # mismatched tz -> cast to object (could reasonably cast to same tz) item = datetime(2000, 1, 4, tzinfo=pytz.timezone("US/Eastern")) - with tm.assert_produces_warning(FutureWarning, match="mismatched timezone"): - result = idx.insert(3, item) + result = idx.insert(3, item) expected = Index( - list(idx[:3]) + [item] + list(idx[3:]), - dtype=object, - # once deprecation is enforced - # list(idx[:3]) + [item.astimezone(idx.tzinfo)] + list(idx[3:]), + list(idx[:3]) + [item.astimezone(idx.tzinfo)] + list(idx[3:]), name="idx", ) - # once deprecation is enforced - # assert expected.dtype == idx.dtype + assert expected.dtype == idx.dtype tm.assert_index_equal(result, expected) @pytest.mark.parametrize( diff --git a/pandas/tests/indexes/datetimes/test_constructors.py b/pandas/tests/indexes/datetimes/test_constructors.py index 0061cfd2b903f..4aaa2b694102d 100644 --- a/pandas/tests/indexes/datetimes/test_constructors.py +++ b/pandas/tests/indexes/datetimes/test_constructors.py @@ -466,25 +466,57 @@ def test_construction_dti_with_mixed_timezones(self): name="idx", ) - with pytest.raises(ValueError, match=msg): - DatetimeIndex( - [ - Timestamp("2011-01-01 10:00"), - Timestamp("2011-01-02 10:00", tz="US/Eastern"), - ], - tz="Asia/Tokyo", - name="idx", - ) + # pre-2.0 this raised bc of awareness mismatch. in 2.0 with a tz# + # specified we behave as if this was called pointwise, so + # the naive Timestamp is treated as a wall time. + dti = DatetimeIndex( + [ + Timestamp("2011-01-01 10:00"), + Timestamp("2011-01-02 10:00", tz="US/Eastern"), + ], + tz="Asia/Tokyo", + name="idx", + ) + expected = DatetimeIndex( + [ + Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"), + Timestamp("2011-01-02 10:00", tz="US/Eastern").tz_convert("Asia/Tokyo"), + ], + tz="Asia/Tokyo", + name="idx", + ) + tm.assert_index_equal(dti, expected) - with pytest.raises(ValueError, match=msg): - DatetimeIndex( - [ - Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"), - Timestamp("2011-01-02 10:00", tz="US/Eastern"), - ], - tz="US/Eastern", - name="idx", - ) + # pre-2.0 mixed-tz scalars raised even if a tz/dtype was specified. + # as of 2.0 we successfully return the requested tz/dtype + dti = DatetimeIndex( + [ + Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"), + Timestamp("2011-01-02 10:00", tz="US/Eastern"), + ], + tz="US/Eastern", + name="idx", + ) + expected = DatetimeIndex( + [ + Timestamp("2011-01-01 10:00", tz="Asia/Tokyo").tz_convert("US/Eastern"), + Timestamp("2011-01-02 10:00", tz="US/Eastern"), + ], + tz="US/Eastern", + name="idx", + ) + tm.assert_index_equal(dti, expected) + + # same thing but pass dtype instead of tz + dti = DatetimeIndex( + [ + Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"), + Timestamp("2011-01-02 10:00", tz="US/Eastern"), + ], + dtype="M8[ns, US/Eastern]", + name="idx", + ) + tm.assert_index_equal(dti, expected) def test_construction_base_constructor(self): arr = [Timestamp("2011-01-01"), pd.NaT, Timestamp("2011-01-03")] diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py index 26424904482d1..ee2c06150bf53 100644 --- a/pandas/tests/indexing/test_coercion.py +++ b/pandas/tests/indexing/test_coercion.py @@ -287,14 +287,11 @@ def test_insert_index_datetimes(self, fill_val, exp_dtype, insert_value): assert expected.dtype == object tm.assert_index_equal(result, expected) - # mismatched tz --> cast to object (could reasonably cast to common tz) ts = pd.Timestamp("2012-01-01", tz="Asia/Tokyo") - with tm.assert_produces_warning(FutureWarning, match="mismatched timezone"): - result = obj.insert(1, ts) + result = obj.insert(1, ts) # once deprecation is enforced: - # expected = obj.insert(1, ts.tz_convert(obj.dtype.tz)) - # assert expected.dtype == obj.dtype - expected = obj.astype(object).insert(1, ts) + expected = obj.insert(1, ts.tz_convert(obj.dtype.tz)) + assert expected.dtype == obj.dtype tm.assert_index_equal(result, expected) else: @@ -652,7 +649,8 @@ def test_fillna_datetime(self, index_or_series, fill_val, fill_dtype): [ (pd.Timestamp("2012-01-01", tz="US/Eastern"), "datetime64[ns, US/Eastern]"), (pd.Timestamp("2012-01-01"), object), - (pd.Timestamp("2012-01-01", tz="Asia/Tokyo"), object), + # pre-2.0 with a mismatched tz we would get object result + (pd.Timestamp("2012-01-01", tz="Asia/Tokyo"), "datetime64[ns, US/Eastern]"), (1, object), ("x", object), ], @@ -671,22 +669,19 @@ def test_fillna_datetime64tz(self, index_or_series, fill_val, fill_dtype): ) assert obj.dtype == "datetime64[ns, US/Eastern]" + if getattr(fill_val, "tz", None) is None: + fv = fill_val + else: + fv = fill_val.tz_convert(tz) exp = klass( [ pd.Timestamp("2011-01-01", tz=tz), - fill_val, - # Once deprecation is enforced, this becomes: - # fill_val.tz_convert(tz) if getattr(fill_val, "tz", None) - # is not None else fill_val, + fv, pd.Timestamp("2011-01-03", tz=tz), pd.Timestamp("2011-01-04", tz=tz), ] ) - warn = None - if getattr(fill_val, "tz", None) is not None and fill_val.tz != obj[0].tz: - warn = FutureWarning - with tm.assert_produces_warning(warn, match="mismatched timezone"): - self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype) + self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype) @pytest.mark.parametrize( "fill_val", @@ -914,23 +909,16 @@ def test_replace_series_datetime_datetime(self, how, to_key, from_key, replacer) obj = pd.Series(self.rep[from_key], index=index, name="yyy") assert obj.dtype == from_key - warn = None - rep_ser = pd.Series(replacer) - if ( - isinstance(obj.dtype, pd.DatetimeTZDtype) - and isinstance(rep_ser.dtype, pd.DatetimeTZDtype) - and obj.dtype != rep_ser.dtype - ): - # mismatched tz DatetimeArray behavior will change to cast - # for setitem-like methods with mismatched tzs GH#44940 - warn = FutureWarning - - msg = "explicitly cast to object" - with tm.assert_produces_warning(warn, match=msg): - result = obj.replace(replacer) + result = obj.replace(replacer) exp = pd.Series(self.rep[to_key], index=index, name="yyy") - assert exp.dtype == to_key + if isinstance(obj.dtype, pd.DatetimeTZDtype) and isinstance( + exp.dtype, pd.DatetimeTZDtype + ): + # with mismatched tzs, we retain the original dtype as of 2.0 + exp = exp.astype(obj.dtype) + else: + assert exp.dtype == to_key tm.assert_series_equal(result, exp) diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index d462ef534e02f..43abacbd6073c 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -2065,13 +2065,12 @@ def test_setitem_with_expansion(self): df.time = df.set_index("time").index.tz_localize("UTC") v = df[df.new_col == "new"].set_index("time").index.tz_convert("US/Pacific") - # trying to set a single element on a part of a different timezone - # this converts to object + # pre-2.0 trying to set a single element on a part of a different + # timezone converted to object; in 2.0 it retains dtype df2 = df.copy() - with tm.assert_produces_warning(FutureWarning, match="mismatched timezone"): - df2.loc[df2.new_col == "new", "time"] = v + df2.loc[df2.new_col == "new", "time"] = v - expected = Series([v[0], df.loc[1, "time"]], name="time") + expected = Series([v[0].tz_convert("UTC"), df.loc[1, "time"]], name="time") tm.assert_series_equal(df2.time, expected) v = df.loc[df.new_col == "new", "time"] + Timedelta("1s") diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py index e07da3fcdb53c..74d05b7e43b2f 100644 --- a/pandas/tests/series/indexing/test_setitem.py +++ b/pandas/tests/series/indexing/test_setitem.py @@ -1035,28 +1035,18 @@ def key(self): return 0 @pytest.fixture - def expected(self): + def expected(self, obj, val): + # pre-2.0 this would cast to object, in 2.0 we cast the val to + # the target tz expected = Series( [ - Timestamp("2000-01-01 00:00:00-05:00", tz="US/Eastern"), + val.tz_convert("US/Central"), Timestamp("2000-01-02 00:00:00-06:00", tz="US/Central"), ], - dtype=object, + dtype=obj.dtype, ) return expected - @pytest.fixture(autouse=True) - def assert_warns(self, request): - # check that we issue a FutureWarning about timezone-matching - if request.function.__name__ == "test_slice_key": - key = request.getfixturevalue("key") - if not isinstance(key, slice): - # The test is a no-op, so no warning will be issued - yield - return - with tm.assert_produces_warning(FutureWarning, match="mismatched timezone"): - yield - @pytest.mark.parametrize( "obj,expected", @@ -1341,7 +1331,8 @@ def obj(self): "val,exp_dtype", [ (Timestamp("2012-01-01", tz="US/Eastern"), "datetime64[ns, US/Eastern]"), - (Timestamp("2012-01-01", tz="US/Pacific"), object), + # pre-2.0, a mis-matched tz would end up casting to object + (Timestamp("2012-01-01", tz="US/Pacific"), "datetime64[ns, US/Eastern]"), (Timestamp("2012-01-01"), object), (1, object), ], @@ -1353,24 +1344,6 @@ def obj(self): tz = "US/Eastern" return Series(date_range("2011-01-01", freq="D", periods=4, tz=tz)) - @pytest.fixture(autouse=True) - def assert_warns(self, request): - # check that we issue a FutureWarning about timezone-matching - if request.function.__name__ == "test_slice_key": - key = request.getfixturevalue("key") - if not isinstance(key, slice): - # The test is a no-op, so no warning will be issued - yield - return - - exp_dtype = request.getfixturevalue("exp_dtype") - val = request.getfixturevalue("val") - if exp_dtype == object and isinstance(val, Timestamp) and val.tz is not None: - with tm.assert_produces_warning(FutureWarning, match="mismatched timezone"): - yield - else: - yield - @pytest.mark.parametrize( "val,exp_dtype", diff --git a/pandas/tests/series/methods/test_astype.py b/pandas/tests/series/methods/test_astype.py index 7ea287c3510fe..768cc50857e50 100644 --- a/pandas/tests/series/methods/test_astype.py +++ b/pandas/tests/series/methods/test_astype.py @@ -97,6 +97,20 @@ def test_astype_dict_like(self, dtype_class): class TestAstype: + def test_astype_mixed_object_to_dt64tz(self): + # pre-2.0 this raised ValueError bc of tz mismatch + # xref GH#32581 + ts = Timestamp("2016-01-04 05:06:07", tz="US/Pacific") + ts2 = ts.tz_convert("Asia/Tokyo") + + ser = Series([ts, ts2], dtype=object) + res = ser.astype("datetime64[ns, Europe/Brussels]") + expected = Series( + [ts.tz_convert("Europe/Brussels"), ts2.tz_convert("Europe/Brussels")], + dtype="datetime64[ns, Europe/Brussels]", + ) + tm.assert_series_equal(res, expected) + @pytest.mark.parametrize("dtype", np.typecodes["All"]) def test_astype_empty_constructor_equality(self, dtype): # see GH#15524 diff --git a/pandas/tests/series/methods/test_fillna.py b/pandas/tests/series/methods/test_fillna.py index 18a4d8355c764..caa14a440d04c 100644 --- a/pandas/tests/series/methods/test_fillna.py +++ b/pandas/tests/series/methods/test_fillna.py @@ -559,14 +559,15 @@ def test_datetime64_tz_fillna(self, tz): tm.assert_series_equal(expected, result) tm.assert_series_equal(isna(ser), null_loc) - with tm.assert_produces_warning(FutureWarning, match="mismatched timezone"): - result = ser.fillna(Timestamp("20130101", tz="US/Pacific")) + # pre-2.0 fillna with mixed tzs would cast to object, in 2.0 + # it retains dtype. + result = ser.fillna(Timestamp("20130101", tz="US/Pacific")) expected = Series( [ Timestamp("2011-01-01 10:00", tz=tz), - Timestamp("2013-01-01", tz="US/Pacific"), + Timestamp("2013-01-01", tz="US/Pacific").tz_convert(tz), Timestamp("2011-01-03 10:00", tz=tz), - Timestamp("2013-01-01", tz="US/Pacific"), + Timestamp("2013-01-01", tz="US/Pacific").tz_convert(tz), ] ) tm.assert_series_equal(expected, result) @@ -817,18 +818,15 @@ def test_fillna_datetime64_with_timezone_tzinfo(self): result = ser.fillna(datetime(2020, 1, 2, tzinfo=timezone.utc)) tm.assert_series_equal(result, expected) - # but we dont (yet) consider distinct tzinfos for non-UTC tz equivalent + # pre-2.0 we cast to object with mixed tzs, in 2.0 we retain dtype ts = Timestamp("2000-01-01", tz="US/Pacific") ser2 = Series(ser._values.tz_convert("dateutil/US/Pacific")) assert ser2.dtype.kind == "M" - with tm.assert_produces_warning(FutureWarning, match="mismatched timezone"): - result = ser2.fillna(ts) - expected = Series([ser[0], ts, ser[2]], dtype=object) - # TODO(2.0): once deprecation is enforced - # expected = Series( - # [ser2[0], ts.tz_convert(ser2.dtype.tz), ser2[2]], - # dtype=ser2.dtype, - # ) + result = ser2.fillna(ts) + expected = Series( + [ser2[0], ts.tz_convert(ser2.dtype.tz), ser2[2]], + dtype=ser2.dtype, + ) tm.assert_series_equal(result, expected) def test_fillna_pos_args_deprecation(self):
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/49454
2022-11-01T21:00:53Z
2022-11-03T16:49:54Z
2022-11-03T16:49:54Z
2022-11-03T17:03:54Z
Backport PR #49377 on branch 1.5.x (BUG: Fix passing Colormap instance to plot methods with mpl >= 3.6)
diff --git a/doc/source/whatsnew/v1.5.2.rst b/doc/source/whatsnew/v1.5.2.rst index ff617e8bc5eef..e65be3bcecd76 100644 --- a/doc/source/whatsnew/v1.5.2.rst +++ b/doc/source/whatsnew/v1.5.2.rst @@ -14,6 +14,8 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ - Fixed regression in :meth:`Series.replace` raising ``RecursionError`` with numeric dtype and when specifying ``value=None`` (:issue:`45725`) +- Fixed regression in :meth:`DataFrame.plot` preventing :class:`~matplotlib.colors.Colormap` instance + from being passed using the ``colormap`` argument if Matplotlib 3.6+ is used (:issue:`49374`) - .. --------------------------------------------------------------------------- diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 9c3e4f0bb02fb..d1baaebf6c204 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -3927,7 +3927,15 @@ def _background_gradient( rng = smax - smin # extend lower / upper bounds, compresses color range norm = mpl.colors.Normalize(smin - (rng * low), smax + (rng * high)) - rgbas = plt.cm.get_cmap(cmap)(norm(gmap)) + from pandas.plotting._matplotlib.compat import mpl_ge_3_6_0 + + if mpl_ge_3_6_0(): + if cmap is None: + rgbas = mpl.colormaps[mpl.rcParams["image.cmap"]](norm(gmap)) + else: + rgbas = mpl.colormaps.get_cmap(cmap)(norm(gmap)) + else: + rgbas = plt.cm.get_cmap(cmap)(norm(gmap)) def relative_luminance(rgba) -> float: """ diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index 1302413916d58..af91a8ab83e12 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -1222,7 +1222,7 @@ def _make_plot(self): if self.colormap is not None: if mpl_ge_3_6_0(): - cmap = mpl.colormaps[self.colormap] + cmap = mpl.colormaps.get_cmap(self.colormap) else: cmap = self.plt.cm.get_cmap(self.colormap) else: @@ -1302,7 +1302,7 @@ def _make_plot(self): # pandas uses colormap, matplotlib uses cmap. cmap = self.colormap or "BuGn" if mpl_ge_3_6_0(): - cmap = mpl.colormaps[cmap] + cmap = mpl.colormaps.get_cmap(cmap) else: cmap = self.plt.cm.get_cmap(cmap) cb = self.kwds.pop("colorbar", True) diff --git a/pandas/tests/io/formats/style/test_matplotlib.py b/pandas/tests/io/formats/style/test_matplotlib.py index 8d9f075d8674d..52fd5355e3302 100644 --- a/pandas/tests/io/formats/style/test_matplotlib.py +++ b/pandas/tests/io/formats/style/test_matplotlib.py @@ -284,3 +284,17 @@ def test_bar_color_raises(df): msg = "`color` and `cmap` cannot both be given" with pytest.raises(ValueError, match=msg): df.style.bar(color="something", cmap="something else").to_html() + + +@pytest.mark.parametrize( + "plot_method", + ["scatter", "hexbin"], +) +def test_pass_colormap_instance(df, plot_method): + # https://github.com/pandas-dev/pandas/issues/49374 + cmap = mpl.colors.ListedColormap([[1, 1, 1], [0, 0, 0]]) + df["c"] = df.A + df.B + kwargs = dict(x="A", y="B", c="c", colormap=cmap) + if plot_method == "hexbin": + kwargs["C"] = kwargs.pop("c") + getattr(df.plot, plot_method)(**kwargs)
Backport of #49377 to 1.5.x - [x] closes #49374 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/49453
2022-11-01T20:59:07Z
2022-11-02T08:46:03Z
2022-11-02T08:46:03Z
2022-11-02T14:49:39Z
Backport PR #48996 on branch 1.5.x (BUG: CoW - correctly track references for chained operations)
diff --git a/doc/source/whatsnew/v1.5.2.rst b/doc/source/whatsnew/v1.5.2.rst index 4f6274b9084da..ff617e8bc5eef 100644 --- a/doc/source/whatsnew/v1.5.2.rst +++ b/doc/source/whatsnew/v1.5.2.rst @@ -21,7 +21,7 @@ Fixed regressions Bug fixes ~~~~~~~~~ -- +- Bug in the Copy-on-Write implementation losing track of views in certain chained indexing cases (:issue:`48996`) - .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx index 94ae4a021da4d..ded161c70f121 100644 --- a/pandas/_libs/internals.pyx +++ b/pandas/_libs/internals.pyx @@ -676,8 +676,9 @@ cdef class BlockManager: public bint _known_consolidated, _is_consolidated public ndarray _blknos, _blklocs public list refs + public object parent - def __cinit__(self, blocks=None, axes=None, refs=None, verify_integrity=True): + def __cinit__(self, blocks=None, axes=None, refs=None, parent=None, verify_integrity=True): # None as defaults for unpickling GH#42345 if blocks is None: # This adds 1-2 microseconds to DataFrame(np.array([])) @@ -690,6 +691,7 @@ cdef class BlockManager: self.blocks = blocks self.axes = axes.copy() # copy to make sure we are not remotely-mutable self.refs = refs + self.parent = parent # Populate known_consolidate, blknos, and blklocs lazily self._known_consolidated = False @@ -805,7 +807,9 @@ cdef class BlockManager: nrefs.append(weakref.ref(blk)) new_axes = [self.axes[0], self.axes[1]._getitem_slice(slobj)] - mgr = type(self)(tuple(nbs), new_axes, nrefs, verify_integrity=False) + mgr = type(self)( + tuple(nbs), new_axes, nrefs, parent=self, verify_integrity=False + ) # We can avoid having to rebuild blklocs/blknos blklocs = self._blklocs @@ -827,4 +831,6 @@ cdef class BlockManager: new_axes = list(self.axes) new_axes[axis] = new_axes[axis]._getitem_slice(slobj) - return type(self)(tuple(new_blocks), new_axes, new_refs, verify_integrity=False) + return type(self)( + tuple(new_blocks), new_axes, new_refs, parent=self, verify_integrity=False + ) diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 881cea45bdb34..f55fcead61fae 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -55,6 +55,7 @@ import pandas.core.algorithms as algos from pandas.core.arrays._mixins import NDArrayBackedExtensionArray from pandas.core.arrays.sparse import SparseDtype +import pandas.core.common as com from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, @@ -146,6 +147,7 @@ class BaseBlockManager(DataManager): blocks: tuple[Block, ...] axes: list[Index] refs: list[weakref.ref | None] | None + parent: object @property def ndim(self) -> int: @@ -163,6 +165,7 @@ def from_blocks( blocks: list[Block], axes: list[Index], refs: list[weakref.ref | None] | None = None, + parent: object = None, ) -> T: raise NotImplementedError @@ -262,6 +265,8 @@ def _clear_reference_block(self, blkno: int) -> None: """ if self.refs is not None: self.refs[blkno] = None + if com.all_none(*self.refs): + self.parent = None def get_dtypes(self): dtypes = np.array([blk.dtype for blk in self.blocks]) @@ -602,7 +607,9 @@ def _combine( axes[-1] = index axes[0] = self.items.take(indexer) - return type(self).from_blocks(new_blocks, axes, new_refs) + return type(self).from_blocks( + new_blocks, axes, new_refs, parent=None if copy else self + ) @property def nblocks(self) -> int: @@ -645,11 +652,14 @@ def copy_func(ax): new_refs: list[weakref.ref | None] | None if deep: new_refs = None + parent = None else: new_refs = [weakref.ref(blk) for blk in self.blocks] + parent = self res.axes = new_axes res.refs = new_refs + res.parent = parent if self.ndim > 1: # Avoid needing to re-compute these @@ -738,6 +748,7 @@ def reindex_indexer( only_slice=only_slice, use_na_proxy=use_na_proxy, ) + parent = None if com.all_none(*new_refs) else self else: new_blocks = [ blk.take_nd( @@ -750,11 +761,12 @@ def reindex_indexer( for blk in self.blocks ] new_refs = None + parent = None new_axes = list(self.axes) new_axes[axis] = new_axis - new_mgr = type(self).from_blocks(new_blocks, new_axes, new_refs) + new_mgr = type(self).from_blocks(new_blocks, new_axes, new_refs, parent=parent) if axis == 1: # We can avoid the need to rebuild these new_mgr._blknos = self.blknos.copy() @@ -989,6 +1001,7 @@ def __init__( blocks: Sequence[Block], axes: Sequence[Index], refs: list[weakref.ref | None] | None = None, + parent: object = None, verify_integrity: bool = True, ) -> None: @@ -1053,11 +1066,13 @@ def from_blocks( blocks: list[Block], axes: list[Index], refs: list[weakref.ref | None] | None = None, + parent: object = None, ) -> BlockManager: """ Constructor for BlockManager and SingleBlockManager with same signature. """ - return cls(blocks, axes, refs, verify_integrity=False) + parent = parent if _using_copy_on_write() else None + return cls(blocks, axes, refs, parent, verify_integrity=False) # ---------------------------------------------------------------- # Indexing @@ -1079,7 +1094,7 @@ def fast_xs(self, loc: int) -> SingleBlockManager: block = new_block(result, placement=slice(0, len(result)), ndim=1) # in the case of a single block, the new block is a view ref = weakref.ref(self.blocks[0]) - return SingleBlockManager(block, self.axes[0], [ref]) + return SingleBlockManager(block, self.axes[0], [ref], parent=self) dtype = interleaved_dtype([blk.dtype for blk in self.blocks]) @@ -1113,7 +1128,7 @@ def fast_xs(self, loc: int) -> SingleBlockManager: block = new_block(result, placement=slice(0, len(result)), ndim=1) return SingleBlockManager(block, self.axes[0]) - def iget(self, i: int) -> SingleBlockManager: + def iget(self, i: int, track_ref: bool = True) -> SingleBlockManager: """ Return the data as a SingleBlockManager. """ @@ -1123,7 +1138,9 @@ def iget(self, i: int) -> SingleBlockManager: # shortcut for select a single-dim from a 2-dim BM bp = BlockPlacement(slice(0, len(values))) nb = type(block)(values, placement=bp, ndim=1) - return SingleBlockManager(nb, self.axes[1], [weakref.ref(block)]) + ref = weakref.ref(block) if track_ref else None + parent = self if track_ref else None + return SingleBlockManager(nb, self.axes[1], [ref], parent) def iget_values(self, i: int) -> ArrayLike: """ @@ -1365,7 +1382,9 @@ def column_setitem(self, loc: int, idx: int | slice | np.ndarray, value) -> None self.blocks = tuple(blocks) self._clear_reference_block(blkno) - col_mgr = self.iget(loc) + # this manager is only created temporarily to mutate the values in place + # so don't track references, otherwise the `setitem` would perform CoW again + col_mgr = self.iget(loc, track_ref=False) new_mgr = col_mgr.setitem((idx,), value) self.iset(loc, new_mgr._block.values, inplace=True) @@ -1463,7 +1482,9 @@ def idelete(self, indexer) -> BlockManager: nbs, new_refs = self._slice_take_blocks_ax0(taker, only_slice=True) new_columns = self.items[~is_deleted] axes = [new_columns, self.axes[1]] - return type(self)(tuple(nbs), axes, new_refs, verify_integrity=False) + # TODO this might not be needed (can a delete ever be done in chained manner?) + parent = None if com.all_none(*new_refs) else self + return type(self)(tuple(nbs), axes, new_refs, parent, verify_integrity=False) # ---------------------------------------------------------------- # Block-wise Operation @@ -1869,6 +1890,7 @@ def __init__( block: Block, axis: Index, refs: list[weakref.ref | None] | None = None, + parent: object = None, verify_integrity: bool = False, fastpath=lib.no_default, ) -> None: @@ -1887,6 +1909,7 @@ def __init__( self.axes = [axis] self.blocks = (block,) self.refs = refs + self.parent = parent if _using_copy_on_write() else None @classmethod def from_blocks( @@ -1894,6 +1917,7 @@ def from_blocks( blocks: list[Block], axes: list[Index], refs: list[weakref.ref | None] | None = None, + parent: object = None, ) -> SingleBlockManager: """ Constructor for BlockManager and SingleBlockManager with same signature. @@ -1902,7 +1926,7 @@ def from_blocks( assert len(axes) == 1 if refs is not None: assert len(refs) == 1 - return cls(blocks[0], axes[0], refs, verify_integrity=False) + return cls(blocks[0], axes[0], refs, parent, verify_integrity=False) @classmethod def from_array(cls, array: ArrayLike, index: Index) -> SingleBlockManager: @@ -1922,7 +1946,10 @@ def to_2d_mgr(self, columns: Index) -> BlockManager: new_blk = type(blk)(arr, placement=bp, ndim=2) axes = [columns, self.axes[0]] refs: list[weakref.ref | None] = [weakref.ref(blk)] - return BlockManager([new_blk], axes=axes, refs=refs, verify_integrity=False) + parent = self if _using_copy_on_write() else None + return BlockManager( + [new_blk], axes=axes, refs=refs, parent=parent, verify_integrity=False + ) def _has_no_reference(self, i: int = 0) -> bool: """ @@ -2004,7 +2031,7 @@ def getitem_mgr(self, indexer: slice | npt.NDArray[np.bool_]) -> SingleBlockMana new_idx = self.index[indexer] # TODO(CoW) in theory only need to track reference if new_array is a view ref = weakref.ref(blk) - return type(self)(block, new_idx, [ref]) + return type(self)(block, new_idx, [ref], parent=self) def get_slice(self, slobj: slice, axis: int = 0) -> SingleBlockManager: # Assertion disabled for performance @@ -2017,7 +2044,9 @@ def get_slice(self, slobj: slice, axis: int = 0) -> SingleBlockManager: bp = BlockPlacement(slice(0, len(array))) block = type(blk)(array, placement=bp, ndim=1) new_index = self.index._getitem_slice(slobj) - return type(self)(block, new_index, [weakref.ref(blk)]) + # TODO this method is only used in groupby SeriesSplitter at the moment, + # so passing refs / parent is not yet covered by the tests + return type(self)(block, new_index, [weakref.ref(blk)], parent=self) @property def index(self) -> Index: @@ -2064,6 +2093,7 @@ def setitem_inplace(self, indexer, value) -> None: if _using_copy_on_write() and not self._has_no_reference(0): self.blocks = (self._block.copy(),) self.refs = None + self.parent = None self._cache.clear() super().setitem_inplace(indexer, value) @@ -2080,6 +2110,7 @@ def idelete(self, indexer) -> SingleBlockManager: self._cache.clear() # clear reference since delete always results in a new array self.refs = None + self.parent = None return self def fast_xs(self, loc): diff --git a/pandas/tests/copy_view/test_indexing.py b/pandas/tests/copy_view/test_indexing.py index d917a3c79aa97..444c6ff204b88 100644 --- a/pandas/tests/copy_view/test_indexing.py +++ b/pandas/tests/copy_view/test_indexing.py @@ -462,6 +462,158 @@ def test_subset_set_with_column_indexer( tm.assert_frame_equal(df, df_orig) +@pytest.mark.parametrize( + "method", + [ + lambda df: df[["a", "b"]][0:2], + lambda df: df[0:2][["a", "b"]], + lambda df: df[["a", "b"]].iloc[0:2], + lambda df: df[["a", "b"]].loc[0:1], + lambda df: df[0:2].iloc[:, 0:2], + lambda df: df[0:2].loc[:, "a":"b"], # type: ignore[misc] + ], + ids=[ + "row-getitem-slice", + "column-getitem", + "row-iloc-slice", + "row-loc-slice", + "column-iloc-slice", + "column-loc-slice", + ], +) +@pytest.mark.parametrize( + "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"] +) +def test_subset_chained_getitem( + request, method, dtype, using_copy_on_write, using_array_manager +): + # Case: creating a subset using multiple, chained getitem calls using views + # still needs to guarantee proper CoW behaviour + df = DataFrame( + {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)} + ) + df_orig = df.copy() + + # when not using CoW, it depends on whether we have a single block or not + # and whether we are slicing the columns -> in that case we have a view + subset_is_view = request.node.callspec.id in ( + "single-block-column-iloc-slice", + "single-block-column-loc-slice", + ) or ( + request.node.callspec.id + in ("mixed-block-column-iloc-slice", "mixed-block-column-loc-slice") + and using_array_manager + ) + + # modify subset -> don't modify parent + subset = method(df) + subset.iloc[0, 0] = 0 + if using_copy_on_write or (not subset_is_view): + tm.assert_frame_equal(df, df_orig) + else: + assert df.iloc[0, 0] == 0 + + # modify parent -> don't modify subset + subset = method(df) + df.iloc[0, 0] = 0 + expected = DataFrame({"a": [1, 2], "b": [4, 5]}) + if using_copy_on_write or not subset_is_view: + tm.assert_frame_equal(subset, expected) + else: + assert subset.iloc[0, 0] == 0 + + +@pytest.mark.parametrize( + "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"] +) +def test_subset_chained_getitem_column(dtype, using_copy_on_write): + # Case: creating a subset using multiple, chained getitem calls using views + # still needs to guarantee proper CoW behaviour + df = DataFrame( + {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)} + ) + df_orig = df.copy() + + # modify subset -> don't modify parent + subset = df[:]["a"][0:2] + df._clear_item_cache() + subset.iloc[0] = 0 + if using_copy_on_write: + tm.assert_frame_equal(df, df_orig) + else: + assert df.iloc[0, 0] == 0 + + # modify parent -> don't modify subset + subset = df[:]["a"][0:2] + df._clear_item_cache() + df.iloc[0, 0] = 0 + expected = Series([1, 2], name="a") + if using_copy_on_write: + tm.assert_series_equal(subset, expected) + else: + assert subset.iloc[0] == 0 + + +@pytest.mark.parametrize( + "method", + [ + lambda s: s["a":"c"]["a":"b"], # type: ignore[misc] + lambda s: s.iloc[0:3].iloc[0:2], + lambda s: s.loc["a":"c"].loc["a":"b"], # type: ignore[misc] + lambda s: s.loc["a":"c"] # type: ignore[misc] + .iloc[0:3] + .iloc[0:2] + .loc["a":"b"] # type: ignore[misc] + .iloc[0:1], + ], + ids=["getitem", "iloc", "loc", "long-chain"], +) +def test_subset_chained_getitem_series(method, using_copy_on_write): + # Case: creating a subset using multiple, chained getitem calls using views + # still needs to guarantee proper CoW behaviour + s = Series([1, 2, 3], index=["a", "b", "c"]) + s_orig = s.copy() + + # modify subset -> don't modify parent + subset = method(s) + subset.iloc[0] = 0 + if using_copy_on_write: + tm.assert_series_equal(s, s_orig) + else: + assert s.iloc[0] == 0 + + # modify parent -> don't modify subset + subset = s.iloc[0:3].iloc[0:2] + s.iloc[0] = 0 + expected = Series([1, 2], index=["a", "b"]) + if using_copy_on_write: + tm.assert_series_equal(subset, expected) + else: + assert subset.iloc[0] == 0 + + +def test_subset_chained_single_block_row(using_copy_on_write, using_array_manager): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}) + df_orig = df.copy() + + # modify subset -> don't modify parent + subset = df[:].iloc[0].iloc[0:2] + subset.iloc[0] = 0 + if using_copy_on_write or using_array_manager: + tm.assert_frame_equal(df, df_orig) + else: + assert df.iloc[0, 0] == 0 + + # modify parent -> don't modify subset + subset = df[:].iloc[0].iloc[0:2] + df.iloc[0, 0] = 0 + expected = Series([1, 4], index=["a", "b"], name=0) + if using_copy_on_write or using_array_manager: + tm.assert_series_equal(subset, expected) + else: + assert subset.iloc[0] == 0 + + # TODO add more tests modifying the parent diff --git a/pandas/tests/copy_view/test_internals.py b/pandas/tests/copy_view/test_internals.py index 2191fc1b33218..edfa7f843f17f 100644 --- a/pandas/tests/copy_view/test_internals.py +++ b/pandas/tests/copy_view/test_internals.py @@ -1,4 +1,5 @@ import numpy as np +import pytest import pandas.util._test_decorators as td @@ -43,3 +44,21 @@ def test_consolidate(using_copy_on_write): subset.iloc[0, 1] = 0.0 assert df._mgr._has_no_reference(1) assert df.loc[0, "b"] == 0.1 + + +@td.skip_array_manager_invalid_test +def test_clear_parent(using_copy_on_write): + # ensure to clear parent reference if we are no longer viewing data from parent + if not using_copy_on_write: + pytest.skip("test only relevant when using copy-on-write") + + df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]}) + subset = df[:] + assert subset._mgr.parent is not None + + # replacing existing columns loses the references to the parent df + subset["a"] = 0 + assert subset._mgr.parent is not None + # when losing the last reference, also the parent should be reset + subset["b"] = 0 + assert subset._mgr.parent is None diff --git a/pandas/tests/copy_view/test_methods.py b/pandas/tests/copy_view/test_methods.py index df723808ce06b..956e2cf98c9b6 100644 --- a/pandas/tests/copy_view/test_methods.py +++ b/pandas/tests/copy_view/test_methods.py @@ -1,4 +1,5 @@ import numpy as np +import pytest from pandas import ( DataFrame, @@ -156,7 +157,7 @@ def test_to_frame(using_copy_on_write): ser = Series([1, 2, 3]) ser_orig = ser.copy() - df = ser.to_frame() + df = ser[:].to_frame() # currently this always returns a "view" assert np.shares_memory(ser.values, get_array(df, 0)) @@ -169,5 +170,47 @@ def test_to_frame(using_copy_on_write): tm.assert_series_equal(ser, ser_orig) else: # but currently select_dtypes() actually returns a view -> mutates parent - ser_orig.iloc[0] = 0 - tm.assert_series_equal(ser, ser_orig) + expected = ser_orig.copy() + expected.iloc[0] = 0 + tm.assert_series_equal(ser, expected) + + # modify original series -> don't modify dataframe + df = ser[:].to_frame() + ser.iloc[0] = 0 + + if using_copy_on_write: + tm.assert_frame_equal(df, ser_orig.to_frame()) + else: + expected = ser_orig.copy().to_frame() + expected.iloc[0, 0] = 0 + tm.assert_frame_equal(df, expected) + + +@pytest.mark.parametrize( + "method, idx", + [ + (lambda df: df.copy(deep=False).copy(deep=False), 0), + (lambda df: df.reset_index().reset_index(), 2), + (lambda df: df.rename(columns=str.upper).rename(columns=str.lower), 0), + (lambda df: df.copy(deep=False).select_dtypes(include="number"), 0), + ], + ids=["shallow-copy", "reset_index", "rename", "select_dtypes"], +) +def test_chained_methods(request, method, idx, using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + + # when not using CoW, only the copy() variant actually gives a view + df2_is_view = not using_copy_on_write and request.node.callspec.id == "shallow-copy" + + # modify df2 -> don't modify df + df2 = method(df) + df2.iloc[0, idx] = 0 + if not df2_is_view: + tm.assert_frame_equal(df, df_orig) + + # modify df -> don't modify df2 + df2 = method(df) + df.iloc[0, 0] = 0 + if not df2_is_view: + tm.assert_frame_equal(df2.iloc[:, idx:], df_orig)
Backport PR #48996: BUG: CoW - correctly track references for chained operations
https://api.github.com/repos/pandas-dev/pandas/pulls/49451
2022-11-01T19:47:27Z
2022-11-01T23:37:10Z
2022-11-01T23:37:10Z
2022-11-01T23:37:10Z
API / CoW: always return new objects for column access (don't use item_cache)
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index 45e4fd9f0aabb..86afebda353a8 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -83,34 +83,40 @@ be set to ``"pyarrow"`` to return pyarrow-backed, nullable :class:`ArrowDtype` ( df_pyarrow = pd.read_csv(data, use_nullable_dtypes=True, engine="pyarrow") df_pyarrow.dtypes -Copy on write improvements +Copy-on-Write improvements ^^^^^^^^^^^^^^^^^^^^^^^^^^ -A new lazy copy mechanism that defers the copy until the object in question is modified -was added to the following methods: - -- :meth:`DataFrame.reset_index` / :meth:`Series.reset_index` -- :meth:`DataFrame.set_index` / :meth:`Series.set_index` -- :meth:`DataFrame.set_axis` / :meth:`Series.set_axis` -- :meth:`DataFrame.rename_axis` / :meth:`Series.rename_axis` -- :meth:`DataFrame.rename_columns` -- :meth:`DataFrame.reindex` / :meth:`Series.reindex` -- :meth:`DataFrame.reindex_like` / :meth:`Series.reindex_like` -- :meth:`DataFrame.assign` -- :meth:`DataFrame.drop` -- :meth:`DataFrame.dropna` / :meth:`Series.dropna` -- :meth:`DataFrame.select_dtypes` -- :meth:`DataFrame.align` / :meth:`Series.align` -- :meth:`Series.to_frame` -- :meth:`DataFrame.rename` / :meth:`Series.rename` -- :meth:`DataFrame.add_prefix` / :meth:`Series.add_prefix` -- :meth:`DataFrame.add_suffix` / :meth:`Series.add_suffix` -- :meth:`DataFrame.drop_duplicates` / :meth:`Series.drop_duplicates` -- :meth:`DataFrame.reorder_levels` / :meth:`Series.reorder_levels` - -These methods return views when copy on write is enabled, which provides a significant -performance improvement compared to the regular execution (:issue:`49473`). Copy on write -can be enabled through +- A new lazy copy mechanism that defers the copy until the object in question is modified + was added to the following methods: + + - :meth:`DataFrame.reset_index` / :meth:`Series.reset_index` + - :meth:`DataFrame.set_index` / :meth:`Series.set_index` + - :meth:`DataFrame.set_axis` / :meth:`Series.set_axis` + - :meth:`DataFrame.rename_axis` / :meth:`Series.rename_axis` + - :meth:`DataFrame.rename_columns` + - :meth:`DataFrame.reindex` / :meth:`Series.reindex` + - :meth:`DataFrame.reindex_like` / :meth:`Series.reindex_like` + - :meth:`DataFrame.assign` + - :meth:`DataFrame.drop` + - :meth:`DataFrame.dropna` / :meth:`Series.dropna` + - :meth:`DataFrame.select_dtypes` + - :meth:`DataFrame.align` / :meth:`Series.align` + - :meth:`Series.to_frame` + - :meth:`DataFrame.rename` / :meth:`Series.rename` + - :meth:`DataFrame.add_prefix` / :meth:`Series.add_prefix` + - :meth:`DataFrame.add_suffix` / :meth:`Series.add_suffix` + - :meth:`DataFrame.drop_duplicates` / :meth:`Series.drop_duplicates` + - :meth:`DataFrame.reorder_levels` / :meth:`Series.reorder_levels` + + These methods return views when Copy-on-Write is enabled, which provides a significant + performance improvement compared to the regular execution (:issue:`49473`). + +- Accessing a single column of a DataFrame as a Series (e.g. ``df["col"]``) now always + returns a new object every time it is constructed when Copy-on-Write is enabled (not + returning multiple times an identical, cached Series object). This ensures that those + Series objects correctly follow the Copy-on-Write rules (:issue:`49450`) + +Copy-on-Write can be enabled through .. code-block:: python diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 4e1d5af1e8a4a..b04654228ff5a 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -35,7 +35,10 @@ import numpy as np from numpy import ma -from pandas._config import get_option +from pandas._config import ( + get_option, + using_copy_on_write, +) from pandas._libs import ( algos as libalgos, @@ -4153,6 +4156,10 @@ def _clear_item_cache(self) -> None: def _get_item_cache(self, item: Hashable) -> Series: """Return the cached item, item represents a label indexer.""" + if using_copy_on_write(): + loc = self.columns.get_loc(item) + return self._ixs(loc, axis=1) + cache = self._item_cache res = cache.get(item) if res is None: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 8980fe0249193..a12da1d6dec9e 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3651,6 +3651,8 @@ def _maybe_update_cacher( verify_is_copy : bool, default True Provide is_copy checks. """ + if using_copy_on_write(): + return if verify_is_copy: self._check_setitem_copy(t="referent") diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index fc1ec1b0b9a35..e0e5c15f6adfc 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -14,6 +14,8 @@ import numpy as np +from pandas._config import using_copy_on_write + from pandas._typing import ( ArrayLike, Axis, @@ -887,6 +889,13 @@ def is_in_axis(key) -> bool: def is_in_obj(gpr) -> bool: if not hasattr(gpr, "name"): return False + if using_copy_on_write(): + # For the CoW case, we need an equality check as the identity check + # no longer works (each Series from column access is a new object) + try: + return gpr.equals(obj[gpr.name]) + except (AttributeError, KeyError, IndexError, InvalidIndexError): + return False try: return gpr is obj[gpr.name] except (KeyError, IndexError, InvalidIndexError): diff --git a/pandas/core/series.py b/pandas/core/series.py index 6b82d48f82ce7..31e3a8d322089 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1231,6 +1231,8 @@ def _set_as_cached(self, item, cacher) -> None: Set the _cacher attribute on the calling object with a weakref to cacher. """ + if using_copy_on_write(): + return self._cacher = (item, weakref.ref(cacher)) def _clear_item_cache(self) -> None: @@ -1254,6 +1256,10 @@ def _maybe_update_cacher( """ See NDFrame._maybe_update_cacher.__doc__ """ + # for CoW, we never want to update the parent DataFrame cache + # if the Series changed, but don't keep track of any cacher + if using_copy_on_write(): + return cacher = getattr(self, "_cacher", None) if cacher is not None: assert self.ndim == 1 @@ -1263,13 +1269,7 @@ def _maybe_update_cacher( # a copy if ref is None: del self._cacher - # for CoW, we never want to update the parent DataFrame cache - # if the Series changed, and always pop the cached item - elif ( - not using_copy_on_write() - and len(self) == len(ref) - and self.name in ref.columns - ): + elif len(self) == len(ref) and self.name in ref.columns: # GH#42530 self.name must be in ref.columns # to ensure column still in dataframe # otherwise, either self or ref has swapped in new arrays diff --git a/pandas/tests/copy_view/test_indexing.py b/pandas/tests/copy_view/test_indexing.py index 184799cd1efa8..fd312c3375240 100644 --- a/pandas/tests/copy_view/test_indexing.py +++ b/pandas/tests/copy_view/test_indexing.py @@ -820,6 +820,46 @@ def test_column_as_series_set_with_upcast(using_copy_on_write, using_array_manag tm.assert_frame_equal(df, df_orig) +@pytest.mark.parametrize( + "method", + [ + lambda df: df["a"], + lambda df: df.loc[:, "a"], + lambda df: df.iloc[:, 0], + ], + ids=["getitem", "loc", "iloc"], +) +def test_column_as_series_no_item_cache( + request, method, using_copy_on_write, using_array_manager +): + # Case: selecting a single column (which now also uses Copy-on-Write to protect + # the view) should always give a new object (i.e. not make use of a cache) + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + + s1 = method(df) + s2 = method(df) + + is_iloc = request.node.callspec.id == "iloc" + if using_copy_on_write or is_iloc: + assert s1 is not s2 + else: + assert s1 is s2 + + if using_copy_on_write or using_array_manager: + s1.iloc[0] = 0 + else: + with pd.option_context("chained_assignment", "warn"): + with tm.assert_produces_warning(SettingWithCopyWarning): + s1.iloc[0] = 0 + + if using_copy_on_write: + tm.assert_series_equal(s2, df_orig["a"]) + tm.assert_frame_equal(df, df_orig) + else: + assert s2.iloc[0] == 0 + + # TODO add tests for other indexing methods on the Series diff --git a/pandas/tests/frame/indexing/test_xs.py b/pandas/tests/frame/indexing/test_xs.py index 971ce2e467aa9..b7549771c7cc5 100644 --- a/pandas/tests/frame/indexing/test_xs.py +++ b/pandas/tests/frame/indexing/test_xs.py @@ -36,7 +36,8 @@ def four_level_index_dataframe(): class TestXS: - def test_xs(self, float_frame, datetime_frame): + def test_xs(self, float_frame, datetime_frame, using_copy_on_write): + float_frame_orig = float_frame.copy() idx = float_frame.index[5] xs = float_frame.xs(idx) for item, value in xs.items(): @@ -66,7 +67,12 @@ def test_xs(self, float_frame, datetime_frame): # view is returned if possible series = float_frame.xs("A", axis=1) series[:] = 5 - assert (expected == 5).all() + if using_copy_on_write: + # but with CoW the view shouldn't propagate mutations + tm.assert_series_equal(float_frame["A"], float_frame_orig["A"]) + assert not (expected == 5).all() + else: + assert (expected == 5).all() def test_xs_corner(self): # pathological mixed-type reordering case diff --git a/pandas/tests/frame/methods/test_cov_corr.py b/pandas/tests/frame/methods/test_cov_corr.py index d7333ce03c215..5082aea354d6d 100644 --- a/pandas/tests/frame/methods/test_cov_corr.py +++ b/pandas/tests/frame/methods/test_cov_corr.py @@ -206,7 +206,7 @@ def test_corr_nullable_integer(self, nullable_column, other_column, method): expected = DataFrame(np.ones((2, 2)), columns=["a", "b"], index=["a", "b"]) tm.assert_frame_equal(result, expected) - def test_corr_item_cache(self): + def test_corr_item_cache(self, using_copy_on_write): # Check that corr does not lead to incorrect entries in item_cache df = DataFrame({"A": range(10)}) @@ -217,11 +217,16 @@ def test_corr_item_cache(self): _ = df.corr(numeric_only=True) - # Check that the corr didn't break link between ser and df - ser.values[0] = 99 - assert df.loc[0, "A"] == 99 - assert df["A"] is ser - assert df.values[0, 0] == 99 + if using_copy_on_write: + # TODO(CoW) we should disallow this, so `df` doesn't get updated + ser.values[0] = 99 + assert df.loc[0, "A"] == 99 + else: + # Check that the corr didn't break link between ser and df + ser.values[0] = 99 + assert df.loc[0, "A"] == 99 + assert df["A"] is ser + assert df.values[0, 0] == 99 @pytest.mark.parametrize("length", [2, 20, 200, 2000]) def test_corr_for_constant_columns(self, length): diff --git a/pandas/tests/frame/methods/test_sort_values.py b/pandas/tests/frame/methods/test_sort_values.py index d7f1d900db052..852432f209329 100644 --- a/pandas/tests/frame/methods/test_sort_values.py +++ b/pandas/tests/frame/methods/test_sort_values.py @@ -330,10 +330,21 @@ def test_sort_values_datetimes(self): df2 = df.sort_values(by=["C", "B"]) tm.assert_frame_equal(df1, df2) - def test_sort_values_frame_column_inplace_sort_exception(self, float_frame): + def test_sort_values_frame_column_inplace_sort_exception( + self, float_frame, using_copy_on_write + ): s = float_frame["A"] - with pytest.raises(ValueError, match="This Series is a view"): + float_frame_orig = float_frame.copy() + if using_copy_on_write: + # INFO(CoW) Series is a new object, so can be changed inplace + # without modifying original datafame s.sort_values(inplace=True) + tm.assert_series_equal(s, float_frame_orig["A"].sort_values()) + # column in dataframe is not changed + tm.assert_frame_equal(float_frame, float_frame_orig) + else: + with pytest.raises(ValueError, match="This Series is a view"): + s.sort_values(inplace=True) cp = s.copy() cp.sort_values() # it works! diff --git a/pandas/tests/frame/methods/test_to_dict_of_blocks.py b/pandas/tests/frame/methods/test_to_dict_of_blocks.py index eb9b78610a112..9705f24d0286c 100644 --- a/pandas/tests/frame/methods/test_to_dict_of_blocks.py +++ b/pandas/tests/frame/methods/test_to_dict_of_blocks.py @@ -1,4 +1,5 @@ import numpy as np +import pytest import pandas.util._test_decorators as td @@ -45,7 +46,9 @@ def test_no_copy_blocks(self, float_frame, using_copy_on_write): assert not _df[column].equals(df[column]) -def test_to_dict_of_blocks_item_cache(): +def test_to_dict_of_blocks_item_cache(request, using_copy_on_write): + if using_copy_on_write: + request.node.add_marker(pytest.mark.xfail(reason="CoW - not yet implemented")) # Calling to_dict_of_blocks should not poison item_cache df = DataFrame({"a": [1, 2, 3, 4], "b": ["a", "b", "c", "d"]}) df["c"] = PandasArray(np.array([1, 2, None, 3], dtype=object)) @@ -56,11 +59,17 @@ def test_to_dict_of_blocks_item_cache(): df._to_dict_of_blocks() - # Check that the to_dict_of_blocks didn't break link between ser and df - ser.values[0] = "foo" - assert df.loc[0, "b"] == "foo" - - assert df["b"] is ser + if using_copy_on_write: + # TODO(CoW) we should disallow this, so `df` doesn't get updated, + # this currently still updates df, so this test fails + ser.values[0] = "foo" + assert df.loc[0, "b"] == "a" + else: + # Check that the to_dict_of_blocks didn't break link between ser and df + ser.values[0] = "foo" + assert df.loc[0, "b"] == "foo" + + assert df["b"] is ser def test_set_change_dtype_slice(): diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 3257c3d4bd128..e009ba45514a2 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -285,9 +285,10 @@ def test_constructor_dtype_nocast_view_dataframe(self, using_copy_on_write): df = DataFrame([[1, 2]]) should_be_view = DataFrame(df, dtype=df[0].dtype) if using_copy_on_write: - # INFO(CoW) doesn't mutate original + # TODO(CoW) doesn't mutate original should_be_view.iloc[0, 0] = 99 - assert df.values[0, 0] == 1 + # assert df.values[0, 0] == 1 + assert df.values[0, 0] == 99 else: should_be_view[0][0] = 99 assert df.values[0, 0] == 99 diff --git a/pandas/tests/generic/test_duplicate_labels.py b/pandas/tests/generic/test_duplicate_labels.py index d6d5c29e6d888..f09fa147076b2 100644 --- a/pandas/tests/generic/test_duplicate_labels.py +++ b/pandas/tests/generic/test_duplicate_labels.py @@ -90,8 +90,9 @@ def test_preserve_getitem(self): assert df.loc[[0]].flags.allows_duplicate_labels is False assert df.loc[0, ["A"]].flags.allows_duplicate_labels is False - @pytest.mark.xfail(reason="Unclear behavior.") - def test_ndframe_getitem_caching_issue(self): + def test_ndframe_getitem_caching_issue(self, request, using_copy_on_write): + if not using_copy_on_write: + request.node.add_marker(pytest.mark.xfail(reason="Unclear behavior.")) # NDFrame.__getitem__ will cache the first df['A']. May need to # invalidate that cache? Update the cached entries? df = pd.DataFrame({"A": [0]}).set_flags(allows_duplicate_labels=False) diff --git a/pandas/tests/indexing/test_chaining_and_caching.py b/pandas/tests/indexing/test_chaining_and_caching.py index 78727917ba66a..2656cc77c2a9d 100644 --- a/pandas/tests/indexing/test_chaining_and_caching.py +++ b/pandas/tests/indexing/test_chaining_and_caching.py @@ -115,12 +115,15 @@ def test_setitem_cache_updating_slices(self, using_copy_on_write): tm.assert_frame_equal(out, expected) tm.assert_series_equal(out["A"], expected["A"]) - def test_altering_series_clears_parent_cache(self): + def test_altering_series_clears_parent_cache(self, using_copy_on_write): # GH #33675 df = DataFrame([[1, 2], [3, 4]], index=["a", "b"], columns=["A", "B"]) ser = df["A"] - assert "A" in df._item_cache + if using_copy_on_write: + assert "A" not in df._item_cache + else: + assert "A" in df._item_cache # Adding a new entry to ser swaps in a new array, so "A" needs to # be removed from df._item_cache diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index 0f85cb4515e13..27dd16172b992 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -881,10 +881,9 @@ def test_series_indexing_zerodim_np_array(self): result = s.iloc[np.array(0)] assert result == 1 - def test_iloc_setitem_categorical_updates_inplace(self, using_copy_on_write): + def test_iloc_setitem_categorical_updates_inplace(self): # Mixed dtype ensures we go through take_split_path in setitem_with_indexer cat = Categorical(["A", "B", "C"]) - cat_original = cat.copy() df = DataFrame({1: cat, 2: [1, 2, 3]}, copy=False) assert tm.shares_memory(df[1], cat) @@ -893,12 +892,8 @@ def test_iloc_setitem_categorical_updates_inplace(self, using_copy_on_write): # values inplace df.iloc[:, 0] = cat[::-1] - if not using_copy_on_write: - assert tm.shares_memory(df[1], cat) - expected = Categorical(["C", "B", "A"], categories=["A", "B", "C"]) - else: - expected = cat_original - + assert tm.shares_memory(df[1], cat) + expected = Categorical(["C", "B", "A"], categories=["A", "B", "C"]) tm.assert_categorical_equal(cat, expected) def test_iloc_with_boolean_operation(self): diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index cb65ecf411118..61e95de3caf0d 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -1099,7 +1099,10 @@ def test_identity_slice_returns_new_object(self, using_copy_on_write): # These should not return copies df = DataFrame(np.random.randn(10, 4)) - assert df[0] is df.loc[:, 0] + if using_copy_on_write: + assert df[0] is not df.loc[:, 0] + else: + assert df[0] is df.loc[:, 0] # Same tests for Series original_series = Series([1, 2, 3, 4, 5, 6]) diff --git a/pandas/tests/series/methods/test_sort_values.py b/pandas/tests/series/methods/test_sort_values.py index b5f589b3b2514..6ca08c32dcfe7 100644 --- a/pandas/tests/series/methods/test_sort_values.py +++ b/pandas/tests/series/methods/test_sort_values.py @@ -10,7 +10,7 @@ class TestSeriesSortValues: - def test_sort_values(self, datetime_series): + def test_sort_values(self, datetime_series, using_copy_on_write): # check indexes are reordered corresponding with the values ser = Series([3, 2, 4, 1], ["A", "B", "C", "D"]) @@ -85,8 +85,12 @@ def test_sort_values(self, datetime_series): "This Series is a view of some other array, to sort in-place " "you must create a copy" ) - with pytest.raises(ValueError, match=msg): + if using_copy_on_write: s.sort_values(inplace=True) + tm.assert_series_equal(s, df.iloc[:, 0].sort_values()) + else: + with pytest.raises(ValueError, match=msg): + s.sort_values(inplace=True) def test_sort_values_categorical(self):
Currently, we use an item cache for DataFrame columns -> Series. Whenever we access a certain column, we cache the resulting Series in `df._item_cache`, and the next time we access a column, we first check if that column already exists in the cache and if so return that directly. I suppose this was done for making repeated column access faster (although the Series construction overhead for this fast path case also has improved I think). But is also has some behavioral consequences, i.e. Series objects from column access can be _identical_ objects, depending on the context: ```python >>> df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) >>> s1 = df["a"] >>> s2 = df["a"] >>> df['b'] = 10 # set existing column -> clears the item cache >>> s3 = df["a"] >>> s1 is s2 True >>> s1 is s3 False ``` This can actually already cause very subtle differences in our standard behaviour. But specifically for Copy-on-Write, we need that every indexing operation returns a new _object_ in order to have the CoW mechanism work correctly. So the fact that in the example above two separate indexing operations returns the same object (`s1` and `s2`) is a problem / bug in the CoW implementation (mutating `s1` would trigger CoW and not mutate the parent `df`, but because of the caching and being identical objects, `s2` is still changed as well, violating the copy/view rules under CoW). So in this PR I am fixing that by never making use of the item cache _if_ we are using Copy-on-Write. This also turns up some corner cases in the tests .. - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. xref https://github.com/pandas-dev/pandas/issues/48998
https://api.github.com/repos/pandas-dev/pandas/pulls/49450
2022-11-01T19:44:28Z
2023-01-13T12:56:05Z
2023-01-13T12:56:04Z
2023-01-13T13:17:06Z
enable pylint: literal-comparison
diff --git a/pyproject.toml b/pyproject.toml index e1a28bec4c14b..e936ee6b55b1c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -113,12 +113,9 @@ disable = [ "cyclic-import", "duplicate-code", "inconsistent-return-statements", - "invalid-sequence-index", - "literal-comparison", "no-else-continue", "no-else-raise", "no-else-return", - "no-self-use", "redefined-argument-from-local", "too-few-public-methods", "too-many-ancestors",
Issue #48855. This PR enables pylint type "R" warning: `literal-comparison`. - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
https://api.github.com/repos/pandas-dev/pandas/pulls/49447
2022-11-01T16:07:14Z
2022-11-02T09:04:50Z
2022-11-02T09:04:50Z
2022-11-02T09:04:50Z
API: Series([pydate, pydatetime])
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index 0ab75355291f6..96c15d4626142 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -142,6 +142,7 @@ Other API changes - The ``other`` argument in :meth:`DataFrame.mask` and :meth:`Series.mask` now defaults to ``no_default`` instead of ``np.nan`` consistent with :meth:`DataFrame.where` and :meth:`Series.where`. Entries will be filled with the corresponding NULL value (``np.nan`` for numpy dtypes, ``pd.NA`` for extension dtypes). (:issue:`49111`) - When creating a :class:`Series` with a object-dtype :class:`Index` of datetime objects, pandas no longer silently converts the index to a :class:`DatetimeIndex` (:issue:`39307`, :issue:`23598`) - :meth:`Series.unique` with dtype "timedelta64[ns]" or "datetime64[ns]" now returns :class:`TimedeltaArray` or :class:`DatetimeArray` instead of ``numpy.ndarray`` (:issue:`49176`) +- Passing a sequence containing ``datetime`` objects and ``date`` objects to :class:`Series` constructor will return with ``object`` dtype instead of ``datetime64[ns]`` dtype, consistent with :class:`Index` behavior (:issue:`49341`) - Passing strings that cannot be parsed as datetimes to :class:`Series` or :class:`DataFrame` with ``dtype="datetime64[ns]"`` will raise instead of silently ignoring the keyword and returning ``object`` dtype (:issue:`24435`) - diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 188b531b2b469..56fdbfccacc55 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -1640,8 +1640,11 @@ def infer_datetimelike_array(arr: ndarray[object]) -> tuple[str, bool]: return "interval" return "mixed" - if seen_date and not (seen_datetime or seen_timedelta): - return "date" + if seen_date: + if not seen_datetime and not seen_timedelta: + return "date" + return "mixed" + elif seen_datetime and not seen_timedelta: return "datetime" elif seen_timedelta and not seen_datetime: @@ -2570,10 +2573,15 @@ def maybe_convert_objects(ndarray[object] objects, if seen.datetimetz_: if is_datetime_with_singletz_array(objects): from pandas import DatetimeIndex - dti = DatetimeIndex(objects) - # unbox to DatetimeArray - return dti._data + try: + dti = DatetimeIndex(objects) + except OutOfBoundsDatetime: + # e.g. test_to_datetime_cache_coerce_50_lines_outofbounds + pass + else: + # unbox to DatetimeArray + return dti._data seen.object_ = True elif seen.datetime_: diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 54fa9629fecd4..f1c7e5b5fae42 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1224,71 +1224,37 @@ def maybe_infer_to_datetimelike( v = np.array(value, copy=False) - shape = v.shape if v.ndim != 1: v = v.ravel() if not len(v): return value - def try_datetime(v: np.ndarray) -> np.ndarray | DatetimeArray: - # Coerce to datetime64, datetime64tz, or in corner cases - # object[datetimes] - from pandas.core.arrays.datetimes import sequence_to_datetimes - - try: - dta = sequence_to_datetimes(v) - except (ValueError, OutOfBoundsDatetime): - # ValueError for e.g. mixed tzs - # GH#19761 we may have mixed timezones, in which cast 'dta' is - # an ndarray[object]. Only 1 test - # relies on this behavior, see GH#40111 - return v.reshape(shape) - else: - return dta.reshape(shape) - - def try_timedelta(v: np.ndarray) -> np.ndarray: - # safe coerce to timedelta64 - - # will try first with a string & object conversion - try: - # bc we know v.dtype == object, this is equivalent to - # `np.asarray(to_timedelta(v))`, but using a lower-level API that - # does not require a circular import. - td_values = array_to_timedelta64(v).view("m8[ns]") - except OutOfBoundsTimedelta: - return v.reshape(shape) - else: - return td_values.reshape(shape) - - # TODO: this is _almost_ equivalent to lib.maybe_convert_objects, - # the main differences are described in GH#49340 and GH#49341 - # and maybe_convert_objects doesn't catch OutOfBoundsDatetime inferred_type = lib.infer_datetimelike_array(ensure_object(v)) - if inferred_type in ["period", "interval"]: + if inferred_type in ["period", "interval", "timedelta", "datetime"]: # Incompatible return value type (got "Union[ExtensionArray, ndarray]", # expected "Union[ndarray, DatetimeArray, TimedeltaArray, PeriodArray, # IntervalArray]") return lib.maybe_convert_objects( # type: ignore[return-value] - v, convert_period=True, convert_interval=True + v, + convert_period=True, + convert_interval=True, + convert_timedelta=True, + convert_datetime=True, + dtype_if_all_nat=np.dtype("M8[ns]"), ) - if inferred_type == "datetime": - # Incompatible types in assignment (expression has type - # "Union[ndarray[Any, Any], DatetimeArray]", variable has type - # "ndarray[Any, Any]") - value = try_datetime(v) # type: ignore[assignment] - elif inferred_type == "timedelta": - value = try_timedelta(v) elif inferred_type == "nat": # if all NaT, return as datetime # only reached if we have at least 1 NaT and the rest (NaT or None or np.nan) + # This is slightly different from what we'd get with maybe_convert_objects, + # which only converts of all-NaT + from pandas.core.arrays.datetimes import sequence_to_datetimes - # Incompatible types in assignment (expression has type - # "Union[ndarray[Any, Any], DatetimeArray]", variable has type - # "ndarray[Any, Any]") - value = try_datetime(v) # type: ignore[assignment] + # Incompatible types in assignment (expression has type "DatetimeArray", + # variable has type "ndarray[Any, Any]") + value = sequence_to_datetimes(v) # type: ignore[assignment] assert value.dtype == "M8[ns]" return value diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index e1d16fed73a88..1dab8682ce887 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -1340,7 +1340,6 @@ def test_infer_dtype_period_with_na(self, na_value): Timestamp("20170612", tz="US/Eastern"), Timestamp("20170311", tz="US/Eastern"), ], - [date(2017, 6, 12), Timestamp("20170311", tz="US/Eastern")], [np.datetime64("2017-06-12"), np.datetime64("2017-03-11")], [np.datetime64("2017-06-12"), datetime(2017, 3, 11, 1, 15)], ], @@ -1348,11 +1347,19 @@ def test_infer_dtype_period_with_na(self, na_value): def test_infer_datetimelike_array_datetime(self, data): assert lib.infer_datetimelike_array(data) == "datetime" + def test_infer_datetimelike_array_date_mixed(self): + # GH49341 pre-2.0 we these were inferred as "datetime" and "timedelta", + # respectively + data = [date(2017, 6, 12), Timestamp("20170311", tz="US/Eastern")] + assert lib.infer_datetimelike_array(data) == "mixed" + + data = ([timedelta(2017, 6, 12), date(2017, 3, 11)],) + assert lib.infer_datetimelike_array(data) == "mixed" + @pytest.mark.parametrize( "data", [ [timedelta(2017, 6, 12), timedelta(2017, 3, 11)], - [timedelta(2017, 6, 12), date(2017, 3, 11)], [np.timedelta64(2017, "D"), np.timedelta64(6, "s")], [np.timedelta64(2017, "D"), timedelta(2017, 3, 11)], ], diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 1ab04daca60b7..b6e326271ec7d 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -52,6 +52,20 @@ class TestSeriesConstructors: + def test_infer_with_date_and_datetime(self): + # GH#49341 pre-2.0 we inferred datetime-and-date to datetime64, which + # was inconsistent with Index behavior + ts = Timestamp(2016, 1, 1) + vals = [ts.to_pydatetime(), ts.date()] + + ser = Series(vals) + expected = Series(vals, dtype=object) + tm.assert_series_equal(ser, expected) + + idx = Index(vals) + expected = Index(vals, dtype=object) + tm.assert_index_equal(idx, expected) + def test_unparseable_strings_with_dt64_dtype(self): # pre-2.0 these would be silently ignored and come back with object dtype vals = ["aa"]
- [x] closes #49341 (Replace xxxx with the GitHub issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/49446
2022-11-01T16:04:47Z
2022-11-01T23:20:35Z
2022-11-01T23:20:35Z
2022-11-01T23:24:18Z
STYLE: enable pylint warnings for `cell-var-from-loop`
diff --git a/pandas/core/strings/object_array.py b/pandas/core/strings/object_array.py index d45cfa8de6f3e..21e7ede3ed386 100644 --- a/pandas/core/strings/object_array.py +++ b/pandas/core/strings/object_array.py @@ -1,6 +1,7 @@ from __future__ import annotations from collections.abc import Callable # noqa: PDF001 +import functools import re import textwrap from typing import ( @@ -380,9 +381,14 @@ def _str_get_dummies(self, sep: str = "|"): dummies = np.empty((len(arr), len(tags2)), dtype=np.int64) + def _isin(test_elements: str, element: str) -> bool: + return element in test_elements + for i, t in enumerate(tags2): pat = sep + t + sep - dummies[:, i] = lib.map_infer(arr.to_numpy(), lambda x: pat in x) + dummies[:, i] = lib.map_infer( + arr.to_numpy(), functools.partial(_isin, element=pat) + ) return dummies, tags2 def _str_upper(self): diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 1f2bb4c5d21b4..9977c78aab89c 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -4106,44 +4106,44 @@ def process_axes(self, obj, selection: Selection, columns=None) -> DataFrame: for axis, labels in self.non_index_axes: obj = _reindex_axis(obj, axis, labels, columns) - # apply the selection filters (but keep in the same order) - if selection.filter is not None: - for field, op, filt in selection.filter.format(): + def process_filter(field, filt, op): - def process_filter(field, filt): + for axis_name in obj._AXIS_ORDERS: + axis_number = obj._get_axis_number(axis_name) + axis_values = obj._get_axis(axis_name) + assert axis_number is not None - for axis_name in obj._AXIS_ORDERS: - axis_number = obj._get_axis_number(axis_name) - axis_values = obj._get_axis(axis_name) - assert axis_number is not None + # see if the field is the name of an axis + if field == axis_name: - # see if the field is the name of an axis - if field == axis_name: + # if we have a multi-index, then need to include + # the levels + if self.is_multi_index: + filt = filt.union(Index(self.levels)) - # if we have a multi-index, then need to include - # the levels - if self.is_multi_index: - filt = filt.union(Index(self.levels)) + takers = op(axis_values, filt) + return obj.loc(axis=axis_number)[takers] - takers = op(axis_values, filt) - return obj.loc(axis=axis_number)[takers] + # this might be the name of a file IN an axis + elif field in axis_values: - # this might be the name of a file IN an axis - elif field in axis_values: + # we need to filter on this dimension + values = ensure_index(getattr(obj, field).values) + filt = ensure_index(filt) - # we need to filter on this dimension - values = ensure_index(getattr(obj, field).values) - filt = ensure_index(filt) + # hack until we support reversed dim flags + if isinstance(obj, DataFrame): + axis_number = 1 - axis_number - # hack until we support reversed dim flags - if isinstance(obj, DataFrame): - axis_number = 1 - axis_number - takers = op(values, filt) - return obj.loc(axis=axis_number)[takers] + takers = op(values, filt) + return obj.loc(axis=axis_number)[takers] - raise ValueError(f"cannot find the field [{field}] for filtering!") + raise ValueError(f"cannot find the field [{field}] for filtering!") - obj = process_filter(field, filt) + # apply the selection filters (but keep in the same order) + if selection.filter is not None: + for field, op, filt in selection.filter.format(): + obj = process_filter(field, filt, op) return obj diff --git a/pandas/tests/io/parser/test_c_parser_only.py b/pandas/tests/io/parser/test_c_parser_only.py index ecc49ea8adb9f..ec08fb0d60648 100644 --- a/pandas/tests/io/parser/test_c_parser_only.py +++ b/pandas/tests/io/parser/test_c_parser_only.py @@ -176,6 +176,9 @@ def test_precise_conversion(c_parser_only): normal_errors = [] precise_errors = [] + def error(val: float, actual_val: Decimal) -> Decimal: + return abs(Decimal(f"{val:.100}") - actual_val) + # test numbers between 1 and 2 for num in np.linspace(1.0, 2.0, num=500): # 25 decimal digits of precision @@ -192,11 +195,8 @@ def test_precise_conversion(c_parser_only): ) actual_val = Decimal(text[2:]) - def error(val): - return abs(Decimal(f"{val:.100}") - actual_val) - - normal_errors.append(error(normal_val)) - precise_errors.append(error(precise_val)) + normal_errors.append(error(normal_val, actual_val)) + precise_errors.append(error(precise_val, actual_val)) # round-trip should match float() assert roundtrip_val == float(text[2:]) diff --git a/pyproject.toml b/pyproject.toml index 991f6eb24a778..eb24039d1647d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -138,7 +138,6 @@ disable = [ "arguments-renamed", "attribute-defined-outside-init", "broad-except", - "cell-var-from-loop", "comparison-with-callable", "confusing-with-statement", "dangerous-default-value",
- [x] addresses pylint issues with `cell-var-from-loop` as part of general pylint efforts in #48855 - Found one instance of the error that was an easy lift of function outside a loop - Refactored two other locations for error based on tips from @mroeschke TIL `functools.partial` is a thing.
https://api.github.com/repos/pandas-dev/pandas/pulls/49445
2022-11-01T15:36:29Z
2022-11-03T16:32:05Z
2022-11-03T16:32:05Z
2022-11-03T16:32:05Z
Backport PR #49437 on branch 1.5.x (CI: maybe fix docs build)
diff --git a/doc/source/whatsnew/v0.13.0.rst b/doc/source/whatsnew/v0.13.0.rst index 8265ad58f7ea3..44223bc694360 100644 --- a/doc/source/whatsnew/v0.13.0.rst +++ b/doc/source/whatsnew/v0.13.0.rst @@ -733,7 +733,7 @@ Enhancements .. _scipy: http://www.scipy.org .. _documentation: http://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation -.. _guide: http://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html +.. _guide: https://docs.scipy.org/doc/scipy/tutorial/interpolate.html - ``to_csv`` now takes a ``date_format`` keyword argument that specifies how output datetime objects should be formatted. Datetimes encountered in the
Backport PR #49437: CI: maybe fix docs build
https://api.github.com/repos/pandas-dev/pandas/pulls/49444
2022-11-01T14:06:57Z
2022-11-01T15:31:22Z
2022-11-01T15:31:22Z
2022-11-01T15:31:22Z
BUG: MultiIndex.get_indexer not matching nan values
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index df190a4df393c..2ddc8f478f41d 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -534,6 +534,7 @@ Missing MultiIndex ^^^^^^^^^^ +- Bug in :meth:`MultiIndex.get_indexer` not matching ``NaN`` values (:issue:`37222`) - Bug in :meth:`MultiIndex.argsort` raising ``TypeError`` when index contains :attr:`NA` (:issue:`48495`) - Bug in :meth:`MultiIndex.difference` losing extension array dtype (:issue:`48606`) - Bug in :class:`MultiIndex.set_levels` raising ``IndexError`` when setting empty level (:issue:`48636`) diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index f968e879498b2..27edc83c6f329 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -36,6 +36,9 @@ from pandas._libs.missing cimport ( is_matching_na, ) +# Defines shift of MultiIndex codes to avoid negative codes (missing values) +multiindex_nulls_shift = 2 + cdef inline bint is_definitely_invalid_key(object val): try: @@ -648,10 +651,13 @@ cdef class BaseMultiIndexCodesEngine: self.levels = levels self.offsets = offsets - # Transform labels in a single array, and add 1 so that we are working - # with positive integers (-1 for NaN becomes 0): - codes = (np.array(labels, dtype='int64').T + 1).astype('uint64', - copy=False) + # Transform labels in a single array, and add 2 so that we are working + # with positive integers (-1 for NaN becomes 1). This enables us to + # differentiate between values that are missing in other and matching + # NaNs. We will set values that are not found to 0 later: + labels_arr = np.array(labels, dtype='int64').T + multiindex_nulls_shift + codes = labels_arr.astype('uint64', copy=False) + self.level_has_nans = [-1 in lab for lab in labels] # Map each codes combination in the index to an integer unambiguously # (no collisions possible), based on the "offsets", which describe the @@ -680,8 +686,13 @@ cdef class BaseMultiIndexCodesEngine: Integers representing one combination each """ zt = [target._get_level_values(i) for i in range(target.nlevels)] - level_codes = [lev.get_indexer_for(codes) + 1 for lev, codes - in zip(self.levels, zt)] + level_codes = [] + for i, (lev, codes) in enumerate(zip(self.levels, zt)): + result = lev.get_indexer_for(codes) + 1 + result[result > 0] += 1 + if self.level_has_nans[i] and codes.hasnans: + result[codes.isna()] += 1 + level_codes.append(result) return self._codes_to_ints(np.array(level_codes, dtype='uint64').T) def get_indexer(self, target: np.ndarray) -> np.ndarray: @@ -792,7 +803,7 @@ cdef class BaseMultiIndexCodesEngine: if not isinstance(key, tuple): raise KeyError(key) try: - indices = [0 if checknull(v) else lev.get_loc(v) + 1 + indices = [1 if checknull(v) else lev.get_loc(v) + multiindex_nulls_shift for lev, v in zip(self.levels, key)] except KeyError: raise KeyError(key) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 6fc458cf2f478..8d5653f16bbf6 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1087,8 +1087,18 @@ def set_codes(self, codes, *, level=None, verify_integrity: bool = True): @cache_readonly def _engine(self): # Calculate the number of bits needed to represent labels in each - # level, as log2 of their sizes (including -1 for NaN): - sizes = np.ceil(np.log2([len(level) + 1 for level in self.levels])) + # level, as log2 of their sizes: + # NaN values are shifted to 1 and missing values in other while + # calculating the indexer are shifted to 0 + sizes = np.ceil( + np.log2( + [ + len(level) + + libindex.multiindex_nulls_shift # type: ignore[attr-defined] + for level in self.levels + ] + ) + ) # Sum bit counts, starting from the _right_.... lev_bits = np.cumsum(sizes[::-1])[::-1] diff --git a/pandas/tests/indexes/multi/test_drop.py b/pandas/tests/indexes/multi/test_drop.py index 47959ec0a4a57..4bfba07332313 100644 --- a/pandas/tests/indexes/multi/test_drop.py +++ b/pandas/tests/indexes/multi/test_drop.py @@ -32,16 +32,16 @@ def test_drop(idx): tm.assert_index_equal(dropped, expected) index = MultiIndex.from_tuples([("bar", "two")]) - with pytest.raises(KeyError, match=r"^10$"): + with pytest.raises(KeyError, match=r"^15$"): idx.drop([("bar", "two")]) - with pytest.raises(KeyError, match=r"^10$"): + with pytest.raises(KeyError, match=r"^15$"): idx.drop(index) with pytest.raises(KeyError, match=r"^'two'$"): idx.drop(["foo", "two"]) # partially correct argument mixed_index = MultiIndex.from_tuples([("qux", "one"), ("bar", "two")]) - with pytest.raises(KeyError, match=r"^10$"): + with pytest.raises(KeyError, match=r"^15$"): idx.drop(mixed_index) # error='ignore' diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py index fce3da6dd6aee..337f91e0f89b4 100644 --- a/pandas/tests/indexes/multi/test_indexing.py +++ b/pandas/tests/indexes/multi/test_indexing.py @@ -471,6 +471,16 @@ def test_get_indexer_kwarg_validation(self): with pytest.raises(ValueError, match=msg): mi.get_indexer(mi[:-1], tolerance="piano") + def test_get_indexer_nan(self): + # GH#37222 + idx1 = MultiIndex.from_product([["A"], [1.0, 2.0]], names=["id1", "id2"]) + idx2 = MultiIndex.from_product([["A"], [np.nan, 2.0]], names=["id1", "id2"]) + expected = np.array([-1, 1]) + result = idx2.get_indexer(idx1) + tm.assert_numpy_array_equal(result, expected, check_dtype=False) + result = idx1.get_indexer(idx2) + tm.assert_numpy_array_equal(result, expected, check_dtype=False) + def test_getitem(idx): # scalar @@ -527,7 +537,7 @@ class TestGetLoc: def test_get_loc(self, idx): assert idx.get_loc(("foo", "two")) == 1 assert idx.get_loc(("baz", "two")) == 3 - with pytest.raises(KeyError, match=r"^10$"): + with pytest.raises(KeyError, match=r"^15$"): idx.get_loc(("bar", "two")) with pytest.raises(KeyError, match=r"^'quux'$"): idx.get_loc("quux") diff --git a/pandas/tests/indexes/multi/test_setops.py b/pandas/tests/indexes/multi/test_setops.py index eaa4e0a7b5256..3a882b0c34b67 100644 --- a/pandas/tests/indexes/multi/test_setops.py +++ b/pandas/tests/indexes/multi/test_setops.py @@ -659,9 +659,8 @@ def test_union_keep_ea_dtype_with_na(any_numeric_ea_dtype): midx = MultiIndex.from_arrays([arr1, [2, 1]], names=["a", None]) midx2 = MultiIndex.from_arrays([arr2, [1, 2]]) result = midx.union(midx2) - # Expected is actually off and should contain (1, 1) too. See GH#37222 expected = MultiIndex.from_arrays( - [Series([4, pd.NA, pd.NA], dtype=any_numeric_ea_dtype), [2, 1, 2]] + [Series([1, 4, pd.NA, pd.NA], dtype=any_numeric_ea_dtype), [1, 2, 1, 2]] ) tm.assert_index_equal(result, expected) diff --git a/pandas/tests/indexing/multiindex/test_multiindex.py b/pandas/tests/indexing/multiindex/test_multiindex.py index 08e15545cb998..157f0de632e18 100644 --- a/pandas/tests/indexing/multiindex/test_multiindex.py +++ b/pandas/tests/indexing/multiindex/test_multiindex.py @@ -162,10 +162,10 @@ def test_rename_multiindex_with_duplicates(self): [1, 2], ], [ - [(81.0, np.nan), (np.nan, np.nan)], - [(81.0, np.nan), (np.nan, np.nan)], - [1, 2], - [1, 1], + [[81, 82.0, np.nan], Series([np.nan, np.nan, np.nan])], + [[81, 82.0, np.nan], Series([np.nan, np.nan, np.nan])], + [1, np.nan, 2], + [np.nan, 2, 1], ], ), ( @@ -176,8 +176,8 @@ def test_rename_multiindex_with_duplicates(self): [1, 2], ], [ - [(81.0, np.nan), (np.nan, np.nan)], - [(81.0, np.nan), (np.nan, np.nan)], + [[81.0, np.nan], Series([np.nan, np.nan])], + [[81.0, np.nan], Series([np.nan, np.nan])], [1, 2], [2, 1], ], @@ -188,28 +188,17 @@ def test_subtracting_two_series_with_unordered_index_and_all_nan_index( self, data_result, data_expected ): # GH 38439 + # TODO: Refactor. This is impossible to understand GH#49443 a_index_result = MultiIndex.from_tuples(data_result[0]) b_index_result = MultiIndex.from_tuples(data_result[1]) a_series_result = Series(data_result[2], index=a_index_result) b_series_result = Series(data_result[3], index=b_index_result) result = a_series_result.align(b_series_result) - a_index_expected = MultiIndex.from_tuples(data_expected[0]) - b_index_expected = MultiIndex.from_tuples(data_expected[1]) + a_index_expected = MultiIndex.from_arrays(data_expected[0]) + b_index_expected = MultiIndex.from_arrays(data_expected[1]) a_series_expected = Series(data_expected[2], index=a_index_expected) b_series_expected = Series(data_expected[3], index=b_index_expected) - a_series_expected.index = a_series_expected.index.set_levels( - [ - a_series_expected.index.levels[0].astype("float"), - a_series_expected.index.levels[1].astype("float"), - ] - ) - b_series_expected.index = b_series_expected.index.set_levels( - [ - b_series_expected.index.levels[0].astype("float"), - b_series_expected.index.levels[1].astype("float"), - ] - ) tm.assert_series_equal(result[0], a_series_expected) tm.assert_series_equal(result[1], b_series_expected)
- [x] closes #37222 (Replace xxxx with the GitHub issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. This was bugging me for quite some time. The previous solution did not provide a way to differentiate between matching missing values (e.g. NaN and NaN) and values from other that were missing in self. With shifting NaNs to 1 and values from other that are not in self to 0, we can calculate the correct result.
https://api.github.com/repos/pandas-dev/pandas/pulls/49442
2022-11-01T13:36:59Z
2022-11-07T18:09:44Z
2022-11-07T18:09:44Z
2022-11-10T20:42:55Z
Backport PR #49320 on branch 1.5.x ((📚) update docs to mention 3.11 support)
diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst index 00251854e3ffa..31eaa2367b683 100644 --- a/doc/source/getting_started/install.rst +++ b/doc/source/getting_started/install.rst @@ -20,7 +20,7 @@ Instructions for installing from source, Python version support ---------------------- -Officially Python 3.8, 3.9 and 3.10. +Officially Python 3.8, 3.9, 3.10 and 3.11. Installing pandas ----------------- diff --git a/pyproject.toml b/pyproject.toml index 67c56123a847c..54edbfb8ea938 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ requires = [ "setuptools>=51.0.0", "wheel", "Cython>=0.29.32,<3", # Note: sync with setup.py, environment.yml and asv.conf.json - "oldest-supported-numpy>=0.10" + "oldest-supported-numpy>=2022.8.16" ] # uncomment to enable pep517 after versioneer problem is fixed. # https://github.com/python-versioneer/python-versioneer/issues/193 diff --git a/setup.cfg b/setup.cfg index f2314316f7732..cda40dbdfbed0 100644 --- a/setup.cfg +++ b/setup.cfg @@ -22,6 +22,7 @@ classifiers = Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 + Programming Language :: Python :: 3.11 Topic :: Scientific/Engineering project_urls = Bug Tracker = https://github.com/pandas-dev/pandas/issues @@ -33,6 +34,7 @@ packages = find: install_requires = numpy>=1.20.3; python_version<'3.10' numpy>=1.21.0; python_version>='3.10' + numpy>=1.23.2; python_version>='3.11' python-dateutil>=2.8.1 pytz>=2020.1 python_requires = >=3.8
#49320
https://api.github.com/repos/pandas-dev/pandas/pulls/49440
2022-11-01T12:59:54Z
2022-11-01T15:11:38Z
2022-11-01T15:11:38Z
2022-11-01T23:33:42Z
BUG: pivot_table with margins=T raises when results in empty df
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index 6d1f2afab3c6d..6c15c056fc7ce 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -441,7 +441,7 @@ Reshaping - Bug in :meth:`DataFrame.unstack` and :meth:`Series.unstack` unstacking wrong level of :class:`MultiIndex` when :class:`MultiIndex` has mixed names (:issue:`48763`) - Bug in :meth:`DataFrame.pivot` not respecting ``None`` as column name (:issue:`48293`) - Bug in :func:`join` when ``left_on`` or ``right_on`` is or includes a :class:`CategoricalIndex` incorrectly raising ``AttributeError`` (:issue:`48464`) -- +- Bug in :meth:`DataFrame.pivot_table` raising ``ValueError`` with parameter ``margins=True`` when result is an empty :class:`DataFrame` (:issue:`49240`) Sparse ^^^^^^ diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index 8b49d681379c6..37e78c7dbf7a2 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -411,7 +411,11 @@ def _all_key(key): table_pieces.append(transformed_piece) margin_keys.append(all_key) - result = concat(table_pieces, axis=cat_axis) + if not table_pieces: + # GH 49240 + return table + else: + result = concat(table_pieces, axis=cat_axis) if len(rows) == 0: return result diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index b021b1aa97a0e..39ca2c1b75589 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -2086,8 +2086,9 @@ def test_pivot_table_aggfunc_scalar_dropna(self, dropna): tm.assert_frame_equal(result, expected) - def test_pivot_table_empty_aggfunc(self): - # GH 9186 & GH 13483 + @pytest.mark.parametrize("margins", [True, False]) + def test_pivot_table_empty_aggfunc(self, margins): + # GH 9186 & GH 13483 & GH 49240 df = DataFrame( { "A": [2, 2, 3, 3, 2], @@ -2096,7 +2097,9 @@ def test_pivot_table_empty_aggfunc(self): "D": [None, None, None, None, None], } ) - result = df.pivot_table(index="A", columns="D", values="id", aggfunc=np.size) + result = df.pivot_table( + index="A", columns="D", values="id", aggfunc=np.size, margins=margins + ) expected = DataFrame(index=Index([], dtype="int64", name="A")) expected.columns.name = "D" tm.assert_frame_equal(result, expected)
- [x] closes #49240 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/v2.0.0.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/49438
2022-11-01T12:49:44Z
2022-11-02T18:43:52Z
2022-11-02T18:43:52Z
2022-11-02T18:46:27Z
CI: maybe fix docs build
diff --git a/doc/source/whatsnew/v0.13.0.rst b/doc/source/whatsnew/v0.13.0.rst index 8265ad58f7ea3..44223bc694360 100644 --- a/doc/source/whatsnew/v0.13.0.rst +++ b/doc/source/whatsnew/v0.13.0.rst @@ -733,7 +733,7 @@ Enhancements .. _scipy: http://www.scipy.org .. _documentation: http://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation -.. _guide: http://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html +.. _guide: https://docs.scipy.org/doc/scipy/tutorial/interpolate.html - ``to_csv`` now takes a ``date_format`` keyword argument that specifies how output datetime objects should be formatted. Datetimes encountered in the
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/49437
2022-11-01T12:45:14Z
2022-11-01T14:06:25Z
2022-11-01T14:06:25Z
2022-11-01T14:06:34Z
DEPR: Remove df.info(null_counts=)
diff --git a/doc/source/user_guide/options.rst b/doc/source/user_guide/options.rst index c7f5d3ddf66d3..ce805f98ca528 100644 --- a/doc/source/user_guide/options.rst +++ b/doc/source/user_guide/options.rst @@ -249,7 +249,7 @@ displayed when calling :meth:`~pandas.DataFrame.info`. ``display.max_info_rows``: :meth:`~pandas.DataFrame.info` will usually show null-counts for each column. For a large :class:`DataFrame`, this can be quite slow. ``max_info_rows`` and ``max_info_cols`` limit this null check to the specified rows and columns respectively. The :meth:`~pandas.DataFrame.info` -keyword argument ``null_counts=True`` will override this. +keyword argument ``show_counts=True`` will override this. .. ipython:: python diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index 0ab75355291f6..7335dbc28a8f9 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -181,6 +181,7 @@ Removal of prior version deprecations/changes - Removed deprecated :meth:`.Styler.where` (:issue:`49397`) - Removed deprecated :meth:`.Styler.render` (:issue:`49397`) - Removed deprecated argument ``null_color`` in :meth:`.Styler.highlight_null` (:issue:`49397`) +- Removed deprecated ``null_counts`` argument in :meth:`DataFrame.info`. Use ``show_counts`` instead (:issue:`37999`) - Enforced deprecation disallowing passing a timezone-aware :class:`Timestamp` and ``dtype="datetime64[ns]"`` to :class:`Series` or :class:`DataFrame` constructors (:issue:`41555`) - Enforced deprecation disallowing passing a sequence of timezone-aware values and ``dtype="datetime64[ns]"`` to to :class:`Series` or :class:`DataFrame` constructors (:issue:`41555`) - Enforced deprecation disallowing unit-less "datetime64" dtype in :meth:`Series.astype` and :meth:`DataFrame.astype` (:issue:`47844`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 17c4bde9d0279..680f9beacbd2f 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3398,17 +3398,7 @@ def info( max_cols: int | None = None, memory_usage: bool | str | None = None, show_counts: bool | None = None, - null_counts: bool | None = None, ) -> None: - if null_counts is not None: - if show_counts is not None: - raise ValueError("null_counts used with show_counts. Use show_counts.") - warnings.warn( - "null_counts is deprecated. Use show_counts instead", - FutureWarning, - stacklevel=find_stack_level(), - ) - show_counts = null_counts info = DataFrameInfo( data=self, memory_usage=memory_usage, diff --git a/pandas/io/formats/info.py b/pandas/io/formats/info.py index 96b96f31792cc..5e87db93cf56c 100644 --- a/pandas/io/formats/info.py +++ b/pandas/io/formats/info.py @@ -52,13 +52,6 @@ shows the counts, and False never shows the counts.""" ) -null_counts_sub = dedent( - """ - null_counts : bool, optional - .. deprecated:: 1.2.0 - Use show_counts instead.""" -) - frame_examples_sub = dedent( """\ @@ -159,7 +152,6 @@ "type_sub": " and columns", "max_cols_sub": frame_max_cols_sub, "show_counts_sub": show_counts_sub, - "null_counts_sub": null_counts_sub, "examples_sub": frame_examples_sub, "see_also_sub": frame_see_also_sub, "version_added_sub": "", @@ -240,7 +232,6 @@ "type_sub": "", "max_cols_sub": "", "show_counts_sub": show_counts_sub, - "null_counts_sub": "", "examples_sub": series_examples_sub, "see_also_sub": series_see_also_sub, "version_added_sub": "\n.. versionadded:: 1.4.0\n", @@ -280,7 +271,7 @@ at the cost of computational resources. See the :ref:`Frequently Asked Questions <df-memory-usage>` for more details. - {show_counts_sub}{null_counts_sub} + {show_counts_sub} Returns ------- diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index 6ab45de35fecf..f870ef25991df 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -209,20 +209,6 @@ def test_show_counts(self, row, columns, show_counts, result): df.info(buf=buf, show_counts=show_counts) assert ("non-null" in buf.getvalue()) is result - def test_show_null_counts_deprecation(self): - # GH37999 - df = DataFrame(1, columns=range(10), index=range(10)) - with tm.assert_produces_warning( - FutureWarning, match="null_counts is deprecated.+" - ): - buf = StringIO() - df.info(buf=buf, null_counts=True) - assert "non-null" in buf.getvalue() - - # GH37999 - with pytest.raises(ValueError, match=r"null_counts used with show_counts.+"): - df.info(null_counts=True, show_counts=True) - def test_repr_truncation(self): max_len = 20 with option_context("display.max_colwidth", max_len):
Introduced in https://github.com/pandas-dev/pandas/pull/37999
https://api.github.com/repos/pandas-dev/pandas/pulls/49430
2022-10-31T22:42:37Z
2022-11-01T14:49:24Z
2022-11-01T14:49:24Z
2022-11-01T16:46:00Z
DEPR: Enforce disallowed merging scenarios
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index 04e5154ca1a0b..d2bcf8d19651e 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -273,6 +273,9 @@ Removal of prior version deprecations/changes - Enforced disallowing a string column label into ``times`` in :meth:`DataFrame.ewm` (:issue:`43265`) - Enforced disallowing a tuple of column labels into :meth:`.DataFrameGroupBy.__getitem__` (:issue:`30546`) - Enforced disallowing setting values with ``.loc`` using a positional slice. Use ``.loc`` with labels or ``.iloc`` with positions instead (:issue:`31840`) +- Enforced disallowing ``dict`` or ``set`` objects in ``suffixes`` in :func:`merge` (:issue:`34810`) +- Enforced disallowing :func:`merge` to produce duplicated columns through the ``suffixes`` keyword and already existing columns (:issue:`22818`) +- Enforced disallowing using :func:`merge` or :func:`join` on a different number of levels (:issue:`34862`) - Removed setting Categorical._codes directly (:issue:`41429`) - Removed setting Categorical.categories directly (:issue:`47834`) - Removed argument ``inplace`` from :meth:`Categorical.add_categories`, :meth:`Categorical.remove_categories`, :meth:`Categorical.set_categories`, :meth:`Categorical.rename_categories`, :meth:`Categorical.reorder_categories`, :meth:`Categorical.set_ordered`, :meth:`Categorical.as_ordered`, :meth:`Categorical.as_unordered` (:issue:`37981`, :issue:`41118`, :issue:`41133`, :issue:`47834`) diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index f198db72460fd..f4332f2c7eb1b 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -676,16 +676,14 @@ def __init__( f"right_index parameter must be of type bool, not {type(right_index)}" ) - # warn user when merging between different levels + # GH 40993: raise when merging between different levels; enforced in 2.0 if _left.columns.nlevels != _right.columns.nlevels: msg = ( - "merging between different levels is deprecated and will be removed " - f"in a future version. ({_left.columns.nlevels} levels on the left, " + "Not allowed to merge between different levels. " + f"({_left.columns.nlevels} levels on the left, " f"{_right.columns.nlevels} on the right)" ) - # stacklevel chosen to be correct when this is reached via pd.merge - # (and not DataFrame.join) - warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) + raise MergeError(msg) self.left_on, self.right_on = self._validate_left_right_on(left_on, right_on) @@ -2475,13 +2473,10 @@ def _items_overlap_with_suffix( If corresponding suffix is empty, the entry is simply converted to string. """ - if not is_list_like(suffixes, allow_sets=False): - warnings.warn( - f"Passing 'suffixes' as a {type(suffixes)}, is not supported and may give " - "unexpected results. Provide 'suffixes' as a tuple instead. In the " - "future a 'TypeError' will be raised.", - FutureWarning, - stacklevel=find_stack_level(), + if not is_list_like(suffixes, allow_sets=False) or isinstance(suffixes, dict): + raise TypeError( + f"Passing 'suffixes' as a {type(suffixes)}, is not supported. " + "Provide 'suffixes' as a tuple instead." ) to_rename = left.intersection(right) @@ -2527,11 +2522,9 @@ def renamer(x, suffix): if not rlabels.is_unique: dups.extend(rlabels[(rlabels.duplicated()) & (~right.duplicated())].tolist()) if dups: - warnings.warn( - f"Passing 'suffixes' which cause duplicate columns {set(dups)} in the " - f"result is deprecated and will raise a MergeError in a future version.", - FutureWarning, - stacklevel=find_stack_level(), + raise MergeError( + f"Passing 'suffixes' which cause duplicate columns {set(dups)} is " + f"not allowed.", ) return llabels, rlabels diff --git a/pandas/tests/frame/methods/test_join.py b/pandas/tests/frame/methods/test_join.py index 7db26f7eb570b..9081f69d5d2bc 100644 --- a/pandas/tests/frame/methods/test_join.py +++ b/pandas/tests/frame/methods/test_join.py @@ -516,8 +516,9 @@ def test_join_multiindex_dates(self): tm.assert_equal(result, expected) - def test_merge_join_different_levels(self): + def test_merge_join_different_levels_raises(self): # GH#9455 + # GH 40993: For raising, enforced in 2.0 # first dataframe df1 = DataFrame(columns=["a", "b"], data=[[1, 11], [0, 22]]) @@ -527,20 +528,16 @@ def test_merge_join_different_levels(self): df2 = DataFrame(columns=columns, data=[[1, 33], [0, 44]]) # merge - columns = ["a", "b", ("c", "c1")] - expected = DataFrame(columns=columns, data=[[1, 11, 33], [0, 22, 44]]) - with tm.assert_produces_warning(FutureWarning): - result = pd.merge(df1, df2, on="a") - tm.assert_frame_equal(result, expected) + with pytest.raises( + MergeError, match="Not allowed to merge between different levels" + ): + pd.merge(df1, df2, on="a") # join, see discussion in GH#12219 - columns = ["a", "b", ("a", ""), ("c", "c1")] - expected = DataFrame(columns=columns, data=[[1, 11, 0, 44], [0, 22, 1, 33]]) - msg = "merging between different levels is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg): - # stacklevel is chosen to be correct for pd.merge, not DataFrame.join - result = df1.join(df2, on="a") - tm.assert_frame_equal(result, expected) + with pytest.raises( + MergeError, match="Not allowed to merge between different levels" + ): + df1.join(df2, on="a") def test_frame_join_tzaware(self): test1 = DataFrame( diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py index 23d7c91ceefae..dd2c59ec161e7 100644 --- a/pandas/tests/reshape/merge/test_join.py +++ b/pandas/tests/reshape/merge/test_join.py @@ -420,18 +420,18 @@ def test_join_inner_multiindex(self, lexsorted_two_level_string_multiindex): # _assert_same_contents(expected, expected2.loc[:, expected.columns]) - def test_join_hierarchical_mixed(self): + def test_join_hierarchical_mixed_raises(self): # GH 2024 + # GH 40993: For raising, enforced in 2.0 df = DataFrame([(1, 2, 3), (4, 5, 6)], columns=["a", "b", "c"]) new_df = df.groupby(["a"]).agg({"b": [np.mean, np.sum]}) other_df = DataFrame([(1, 2, 3), (7, 10, 6)], columns=["a", "b", "d"]) other_df.set_index("a", inplace=True) # GH 9455, 12219 - msg = "merging between different levels is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg): - result = merge(new_df, other_df, left_index=True, right_index=True) - assert ("b", "mean") in result - assert "b" in result + with pytest.raises( + pd.errors.MergeError, match="Not allowed to merge between different levels" + ): + merge(new_df, other_df, left_index=True, right_index=True) def test_join_float64_float32(self): @@ -642,11 +642,12 @@ def test_join_dups(self): dta = x.merge(y, left_index=True, right_index=True).merge( z, left_index=True, right_index=True, how="outer" ) - with tm.assert_produces_warning(FutureWarning): - dta = dta.merge(w, left_index=True, right_index=True) - expected = concat([x, y, z, w], axis=1) - expected.columns = ["x_x", "y_x", "x_y", "y_y", "x_x", "y_x", "x_y", "y_y"] - tm.assert_frame_equal(dta, expected) + # GH 40991: As of 2.0 causes duplicate columns + with pytest.raises( + pd.errors.MergeError, + match="Passing 'suffixes' which cause duplicate columns", + ): + dta.merge(w, left_index=True, right_index=True) def test_join_multi_to_multi(self, join_type): # GH 20475 diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index edfae3ad9dac6..780e3003d50d7 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -2207,6 +2207,7 @@ def test_merge_series(on, left_on, right_on, left_index, right_index, nm): def test_merge_series_multilevel(): # GH#47946 + # GH 40993: For raising, enforced in 2.0 a = DataFrame( {"A": [1, 2, 3, 4]}, index=MultiIndex.from_product([["a", "b"], [0, 1]], names=["outer", "inner"]), @@ -2216,13 +2217,10 @@ def test_merge_series_multilevel(): index=MultiIndex.from_product([["a", "b"], [1, 2]], names=["outer", "inner"]), name=("B", "C"), ) - expected = DataFrame( - {"A": [2, 4], ("B", "C"): [1, 3]}, - index=MultiIndex.from_product([["a", "b"], [1]], names=["outer", "inner"]), - ) - with tm.assert_produces_warning(FutureWarning): - result = merge(a, b, on=["outer", "inner"]) - tm.assert_frame_equal(result, expected) + with pytest.raises( + MergeError, match="Not allowed to merge between different levels" + ): + merge(a, b, on=["outer", "inner"]) @pytest.mark.parametrize( @@ -2303,12 +2301,12 @@ def test_merge_suffix_error(col1, col2, suffixes): @pytest.mark.parametrize("suffixes", [{"left", "right"}, {"left": 0, "right": 0}]) -def test_merge_suffix_warns(suffixes): +def test_merge_suffix_raises(suffixes): a = DataFrame({"a": [1, 2, 3]}) b = DataFrame({"b": [3, 4, 5]}) - with tm.assert_produces_warning(FutureWarning): - merge(a, b, left_index=True, right_index=True, suffixes={"left", "right"}) + with pytest.raises(TypeError, match="Passing 'suffixes' as a"): + merge(a, b, left_index=True, right_index=True, suffixes=suffixes) @pytest.mark.parametrize( @@ -2609,20 +2607,16 @@ def test_merge_result_empty_index_and_on(): tm.assert_frame_equal(result, expected) -def test_merge_suffixes_produce_dup_columns_warns(): - # GH#22818 +def test_merge_suffixes_produce_dup_columns_raises(): + # GH#22818; Enforced in 2.0 left = DataFrame({"a": [1, 2, 3], "b": 1, "b_x": 2}) right = DataFrame({"a": [1, 2, 3], "b": 2}) - expected = DataFrame( - [[1, 1, 2, 2], [2, 1, 2, 2], [3, 1, 2, 2]], columns=["a", "b_x", "b_x", "b_y"] - ) - with tm.assert_produces_warning(FutureWarning): - result = merge(left, right, on="a") - tm.assert_frame_equal(result, expected) - with tm.assert_produces_warning(FutureWarning): + with pytest.raises(MergeError, match="Passing 'suffixes' which cause duplicate"): + merge(left, right, on="a") + + with pytest.raises(MergeError, match="Passing 'suffixes' which cause duplicate"): merge(right, left, on="a", suffixes=("_y", "_x")) - tm.assert_frame_equal(result, expected) def test_merge_duplicate_columns_with_suffix_no_warning(): @@ -2635,15 +2629,13 @@ def test_merge_duplicate_columns_with_suffix_no_warning(): tm.assert_frame_equal(result, expected) -def test_merge_duplicate_columns_with_suffix_causing_another_duplicate(): - # GH#22818 +def test_merge_duplicate_columns_with_suffix_causing_another_duplicate_raises(): + # GH#22818, Enforced in 2.0 # This should raise warning because suffixes cause another collision left = DataFrame([[1, 1, 1, 1], [2, 2, 2, 2]], columns=["a", "b", "b", "b_x"]) right = DataFrame({"a": [1, 3], "b": 2}) - with tm.assert_produces_warning(FutureWarning): - result = merge(left, right, on="a") - expected = DataFrame([[1, 1, 1, 1, 2]], columns=["a", "b_x", "b_x", "b_x", "b_y"]) - tm.assert_frame_equal(result, expected) + with pytest.raises(MergeError, match="Passing 'suffixes' which cause duplicate"): + merge(left, right, on="a") def test_merge_string_float_column_result():
Introduced in https://github.com/pandas-dev/pandas/pull/34810, https://github.com/pandas-dev/pandas/pull/40993, https://github.com/pandas-dev/pandas/pull/40991
https://api.github.com/repos/pandas-dev/pandas/pulls/49429
2022-10-31T22:16:36Z
2022-11-01T22:39:38Z
2022-11-01T22:39:38Z
2022-11-01T22:39:41Z
UPGRADE: Autoupdate pre-commit config
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8ff7526b87521..1893f57fc09c1 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -18,16 +18,16 @@ repos: pass_filenames: true require_serial: false - repo: https://github.com/python/black - rev: 22.8.0 + rev: 22.10.0 hooks: - id: black - repo: https://github.com/codespell-project/codespell - rev: v2.2.1 + rev: v2.2.2 hooks: - id: codespell types_or: [python, rst, markdown] - repo: https://github.com/MarcoGorelli/cython-lint - rev: v0.1.8 + rev: v0.2.1 hooks: - id: cython-lint - repo: https://github.com/pre-commit/pre-commit-hooks @@ -60,7 +60,7 @@ repos: - flake8-bugbear==22.7.1 - pandas-dev-flaker==0.5.0 - repo: https://github.com/pycqa/pylint - rev: v2.15.3 + rev: v2.15.5 hooks: - id: pylint - repo: https://github.com/PyCQA/isort @@ -68,7 +68,7 @@ repos: hooks: - id: isort - repo: https://github.com/asottile/pyupgrade - rev: v2.38.2 + rev: v3.2.0 hooks: - id: pyupgrade args: [--py38-plus] @@ -83,7 +83,7 @@ repos: types: [text] # overwrite types: [rst] types_or: [python, rst] - repo: https://github.com/sphinx-contrib/sphinx-lint - rev: v0.6.1 + rev: v0.6.7 hooks: - id: sphinx-lint - repo: https://github.com/asottile/yesqa diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index 96c47471aaf90..7b9fe6422544c 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -81,26 +81,48 @@ class Infinity: """ Provide a positive Infinity comparison method for ranking. """ - __lt__ = lambda self, other: False - __le__ = lambda self, other: isinstance(other, Infinity) - __eq__ = lambda self, other: isinstance(other, Infinity) - __ne__ = lambda self, other: not isinstance(other, Infinity) - __gt__ = lambda self, other: (not isinstance(other, Infinity) and - not missing.checknull(other)) - __ge__ = lambda self, other: not missing.checknull(other) + def __lt__(self, other): + return False + + def __le__(self, other): + return isinstance(other, Infinity) + + def __eq__(self, other): + return isinstance(other, Infinity) + + def __ne__(self, other): + return not isinstance(other, Infinity) + + def __gt__(self, other): + return (not isinstance(other, Infinity) and + not missing.checknull(other)) + + def __ge__(self, other): + return not missing.checknull(other) class NegInfinity: """ Provide a negative Infinity comparison method for ranking. """ - __lt__ = lambda self, other: (not isinstance(other, NegInfinity) and - not missing.checknull(other)) - __le__ = lambda self, other: not missing.checknull(other) - __eq__ = lambda self, other: isinstance(other, NegInfinity) - __ne__ = lambda self, other: not isinstance(other, NegInfinity) - __gt__ = lambda self, other: False - __ge__ = lambda self, other: isinstance(other, NegInfinity) + def __lt__(self, other): + return (not isinstance(other, NegInfinity) and + not missing.checknull(other)) + + def __le__(self, other): + return not missing.checknull(other) + + def __eq__(self, other): + return isinstance(other, NegInfinity) + + def __ne__(self, other): + return not isinstance(other, NegInfinity) + + def __gt__(self, other): + return False + + def __ge__(self, other): + return isinstance(other, NegInfinity) @cython.wraparound(False) @@ -321,7 +343,7 @@ def kth_smallest(numeric_t[::1] arr, Py_ssize_t k) -> numeric_t: @cython.cdivision(True) def nancorr(const float64_t[:, :] mat, bint cov=False, minp=None): cdef: - Py_ssize_t i, j, xi, yi, N, K + Py_ssize_t i, xi, yi, N, K bint minpv float64_t[:, ::1] result ndarray[uint8_t, ndim=2] mask @@ -377,7 +399,7 @@ def nancorr(const float64_t[:, :] mat, bint cov=False, minp=None): @cython.wraparound(False) def nancorr_spearman(ndarray[float64_t, ndim=2] mat, Py_ssize_t minp=1) -> ndarray: cdef: - Py_ssize_t i, j, xi, yi, N, K + Py_ssize_t i, xi, yi, N, K ndarray[float64_t, ndim=2] result ndarray[float64_t, ndim=2] ranked_mat ndarray[float64_t, ndim=1] rankedx, rankedy @@ -746,7 +768,8 @@ def is_monotonic(ndarray[numeric_object_t, ndim=1] arr, bint timelike): n = len(arr) if n == 1: - if arr[0] != arr[0] or (numeric_object_t is int64_t and timelike and arr[0] == NPY_NAT): + if arr[0] != arr[0] or (numeric_object_t is int64_t and timelike and + arr[0] == NPY_NAT): # single value is NaN return False, False, True else: diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index f798655e9d922..a351ad6e461f3 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -265,7 +265,7 @@ def group_cumprod( This method modifies the `out` parameter, rather than returning an object. """ cdef: - Py_ssize_t i, j, N, K, size + Py_ssize_t i, j, N, K int64float_t val, na_val int64float_t[:, ::1] accum intp_t lab @@ -356,7 +356,7 @@ def group_cumsum( This method modifies the `out` parameter, rather than returning an object. """ cdef: - Py_ssize_t i, j, N, K, size + Py_ssize_t i, j, N, K int64float_t val, y, t, na_val int64float_t[:, ::1] accum, compensation uint8_t[:, ::1] accum_mask @@ -441,7 +441,7 @@ def group_shift_indexer( int periods, ) -> None: cdef: - Py_ssize_t N, i, j, ii, lab + Py_ssize_t N, i, ii, lab int offset = 0, sign int64_t idxer, idxer_slot int64_t[::1] label_seen = np.zeros(ngroups, dtype=np.int64) @@ -743,8 +743,11 @@ def group_sum( # is otherwise the same as in _treat_as_na if uses_mask: isna_entry = mask[i, j] - elif (sum_t is float32_t or sum_t is float64_t - or sum_t is complex64_t or sum_t is complex64_t): + elif ( + sum_t is float32_t + or sum_t is float64_t + or sum_t is complex64_t + ): # avoid warnings because of equality comparison isna_entry = not val == val elif sum_t is int64_t and is_datetimelike and val == NPY_NAT: @@ -770,8 +773,11 @@ def group_sum( # set a placeholder value in out[i, j]. if uses_mask: result_mask[i, j] = True - elif (sum_t is float32_t or sum_t is float64_t - or sum_t is complex64_t or sum_t is complex64_t): + elif ( + sum_t is float32_t + or sum_t is float64_t + or sum_t is complex64_t + ): out[i, j] = NAN elif sum_t is int64_t: out[i, j] = NPY_NAT @@ -799,7 +805,7 @@ def group_prod( """ cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) - int64float_t val, count + int64float_t val int64float_t[:, ::1] prodx int64_t[:, ::1] nobs Py_ssize_t len_values = len(values), len_labels = len(labels) @@ -872,7 +878,7 @@ def group_var( floating[:, ::1] mean int64_t[:, ::1] nobs Py_ssize_t len_values = len(values), len_labels = len(labels) - bint isna_entry, uses_mask = not mask is None + bint isna_entry, uses_mask = mask is not None assert min_count == -1, "'min_count' only used in sum and prod" @@ -969,7 +975,7 @@ def group_mean( mean_t[:, ::1] sumx, compensation int64_t[:, ::1] nobs Py_ssize_t len_values = len(values), len_labels = len(labels) - bint isna_entry, uses_mask = not mask is None + bint isna_entry, uses_mask = mask is not None assert min_count == -1, "'min_count' only used in sum and prod" @@ -1042,10 +1048,10 @@ def group_ohlc( Only aggregates on axis=0 """ cdef: - Py_ssize_t i, j, N, K, lab + Py_ssize_t i, N, K, lab int64float_t val uint8_t[::1] first_element_set - bint isna_entry, uses_mask = not mask is None + bint isna_entry, uses_mask = mask is not None assert min_count == -1, "'min_count' only used in sum and prod" @@ -1240,7 +1246,11 @@ cdef inline bint _treat_as_na(numeric_object_t val, bint is_datetimelike) nogil: return False -cdef numeric_object_t _get_min_or_max(numeric_object_t val, bint compute_max, bint is_datetimelike): +cdef numeric_object_t _get_min_or_max( + numeric_object_t val, + bint compute_max, + bint is_datetimelike, +): """ Find either the min or the max supported by numeric_object_t; 'val' is a placeholder to effectively make numeric_object_t an argument. @@ -1366,7 +1376,10 @@ def group_last( # set a placeholder value in out[i, j]. if uses_mask: result_mask[i, j] = True - elif numeric_object_t is float32_t or numeric_object_t is float64_t: + elif ( + numeric_object_t is float32_t + or numeric_object_t is float64_t + ): out[i, j] = NAN elif numeric_object_t is int64_t: # Per above, this is a placeholder in @@ -1486,7 +1499,10 @@ def group_nth( # it was initialized with np.empty. Also ensures # we can downcast out if appropriate. out[i, j] = 0 - elif numeric_object_t is float32_t or numeric_object_t is float64_t: + elif ( + numeric_object_t is float32_t + or numeric_object_t is float64_t + ): out[i, j] = NAN elif numeric_object_t is int64_t: # Per above, this is a placeholder in diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx index 7f0f91652ae0d..43e33ef3e7d7e 100644 --- a/pandas/_libs/internals.pyx +++ b/pandas/_libs/internals.pyx @@ -133,7 +133,7 @@ cdef class BlockPlacement: @property def as_array(self) -> np.ndarray: cdef: - Py_ssize_t start, stop, end, _ + Py_ssize_t start, stop, _ if not self._has_array: start, stop, step, _ = slice_get_indices_ex(self._as_slice) @@ -259,7 +259,6 @@ cdef class BlockPlacement: """ cdef: slice slc = self._ensure_has_slice() - slice new_slice ndarray[intp_t, ndim=1] new_placement if slc is not None and slc.step == 1: @@ -678,7 +677,14 @@ cdef class BlockManager: public list refs public object parent - def __cinit__(self, blocks=None, axes=None, refs=None, parent=None, verify_integrity=True): + def __cinit__( + self, + blocks=None, + axes=None, + refs=None, + parent=None, + verify_integrity=True, + ): # None as defaults for unpickling GH#42345 if blocks is None: # This adds 1-2 microseconds to DataFrame(np.array([])) diff --git a/pandas/_libs/join.pyx b/pandas/_libs/join.pyx index e574aa10f6b57..667eda1b1f1da 100644 --- a/pandas/_libs/join.pyx +++ b/pandas/_libs/join.pyx @@ -275,7 +275,7 @@ def left_join_indexer_unique( cdef: Py_ssize_t i, j, nleft, nright ndarray[intp_t] indexer - numeric_object_t lval, rval + numeric_object_t rval i = 0 j = 0 @@ -324,7 +324,7 @@ def left_join_indexer(ndarray[numeric_object_t] left, ndarray[numeric_object_t] is non-unique (if both were unique we'd use left_join_indexer_unique). """ cdef: - Py_ssize_t i, j, k, nright, nleft, count + Py_ssize_t i, j, nright, nleft, count numeric_object_t lval, rval ndarray[intp_t] lindexer, rindexer ndarray[numeric_object_t] result @@ -434,7 +434,7 @@ def inner_join_indexer(ndarray[numeric_object_t] left, ndarray[numeric_object_t] Both left and right are monotonic increasing but not necessarily unique. """ cdef: - Py_ssize_t i, j, k, nright, nleft, count + Py_ssize_t i, j, nright, nleft, count numeric_object_t lval, rval ndarray[intp_t] lindexer, rindexer ndarray[numeric_object_t] result diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 56fdbfccacc55..1b871bf0b745f 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -621,6 +621,8 @@ ctypedef fused ndarr_object: # TODO: get rid of this in StringArray and modify # and go through ensure_string_array instead + + @cython.wraparound(False) @cython.boundscheck(False) def convert_nans_to_NA(ndarr_object arr) -> ndarray: @@ -765,9 +767,9 @@ def generate_bins_dt64(ndarray[int64_t, ndim=1] values, const int64_t[:] binner, Int64 (datetime64) version of generic python version in ``groupby.py``. """ cdef: - Py_ssize_t lenidx, lenbin, i, j, bc, vc + Py_ssize_t lenidx, lenbin, i, j, bc ndarray[int64_t, ndim=1] bins - int64_t l_bin, r_bin, nat_count + int64_t r_bin, nat_count bint right_closed = closed == 'right' nat_count = 0 @@ -2215,14 +2217,24 @@ def maybe_convert_numeric( # Otherwise, iterate and do full inference. cdef: - int status, maybe_int + int maybe_int Py_ssize_t i, n = values.size Seen seen = Seen(coerce_numeric) - ndarray[float64_t, ndim=1] floats = cnp.PyArray_EMPTY(1, values.shape, cnp.NPY_FLOAT64, 0) - ndarray[complex128_t, ndim=1] complexes = cnp.PyArray_EMPTY(1, values.shape, cnp.NPY_COMPLEX128, 0) - ndarray[int64_t, ndim=1] ints = cnp.PyArray_EMPTY(1, values.shape, cnp.NPY_INT64, 0) - ndarray[uint64_t, ndim=1] uints = cnp.PyArray_EMPTY(1, values.shape, cnp.NPY_UINT64, 0) - ndarray[uint8_t, ndim=1] bools = cnp.PyArray_EMPTY(1, values.shape, cnp.NPY_UINT8, 0) + ndarray[float64_t, ndim=1] floats = cnp.PyArray_EMPTY( + 1, values.shape, cnp.NPY_FLOAT64, 0 + ) + ndarray[complex128_t, ndim=1] complexes = cnp.PyArray_EMPTY( + 1, values.shape, cnp.NPY_COMPLEX128, 0 + ) + ndarray[int64_t, ndim=1] ints = cnp.PyArray_EMPTY( + 1, values.shape, cnp.NPY_INT64, 0 + ) + ndarray[uint64_t, ndim=1] uints = cnp.PyArray_EMPTY( + 1, values.shape, cnp.NPY_UINT64, 0 + ) + ndarray[uint8_t, ndim=1] bools = cnp.PyArray_EMPTY( + 1, values.shape, cnp.NPY_UINT8, 0 + ) ndarray[uint8_t, ndim=1] mask = np.zeros(n, dtype="u1") float64_t fval bint allow_null_in_int = convert_to_masked_nullable @@ -2301,7 +2313,7 @@ def maybe_convert_numeric( seen.float_ = True else: try: - status = floatify(val, &fval, &maybe_int) + floatify(val, &fval, &maybe_int) if fval in na_values: seen.saw_null() @@ -2440,7 +2452,7 @@ def maybe_convert_objects(ndarray[object] objects, int64_t[::1] itimedeltas Seen seen = Seen() object val - float64_t fval, fnan = np.nan + float64_t fnan = np.nan n = len(objects) @@ -2925,7 +2937,7 @@ def to_object_array(rows: object, min_width: int = 0) -> ndarray: def tuples_to_object_array(ndarray[object] tuples): cdef: - Py_ssize_t i, j, n, k, tmp + Py_ssize_t i, j, n, k ndarray[object, ndim=2] result tuple tup @@ -3053,7 +3065,9 @@ cpdef ndarray eq_NA_compat(ndarray[object] arr, object key): key is assumed to have `not isna(key)` """ cdef: - ndarray[uint8_t, cast=True] result = cnp.PyArray_EMPTY(arr.ndim, arr.shape, cnp.NPY_BOOL, 0) + ndarray[uint8_t, cast=True] result = cnp.PyArray_EMPTY( + arr.ndim, arr.shape, cnp.NPY_BOOL, 0 + ) Py_ssize_t i object item diff --git a/pandas/_libs/testing.pyx b/pandas/_libs/testing.pyx index 679cde9932a7a..b7457f94f3447 100644 --- a/pandas/_libs/testing.pyx +++ b/pandas/_libs/testing.pyx @@ -161,13 +161,17 @@ cpdef assert_almost_equal(a, b, is_unequal = True diff += 1 if not first_diff: - first_diff = f"At positional index {i}, first diff: {a[i]} != {b[i]}" + first_diff = ( + f"At positional index {i}, first diff: {a[i]} != {b[i]}" + ) if is_unequal: from pandas._testing import raise_assert_detail msg = (f"{obj} values are different " f"({np.round(diff * 100.0 / na, 5)} %)") - raise_assert_detail(obj, msg, lobj, robj, first_diff=first_diff, index_values=index_values) + raise_assert_detail( + obj, msg, lobj, robj, first_diff=first_diff, index_values=index_values + ) return True diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index bf912005ae57e..6d6e90673f030 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -261,7 +261,7 @@ def array_with_unit_to_datetime( tz : parsed timezone offset or None """ cdef: - Py_ssize_t i, j, n=len(values) + Py_ssize_t i, n=len(values) int64_t mult int prec = 0 ndarray[float64_t] fvalues @@ -418,6 +418,7 @@ def array_with_unit_to_datetime( return oresult, tz + @cython.wraparound(False) @cython.boundscheck(False) def first_non_null(values: ndarray) -> int: @@ -425,7 +426,6 @@ def first_non_null(values: ndarray) -> int: cdef: Py_ssize_t n = len(values) Py_ssize_t i - int result for i in range(n): val = values[i] if checknull_with_nat_and_na(val): @@ -436,6 +436,7 @@ def first_non_null(values: ndarray) -> int: else: return -1 + @cython.wraparound(False) @cython.boundscheck(False) cpdef array_to_datetime( @@ -610,7 +611,8 @@ cpdef array_to_datetime( continue elif is_raise: raise ValueError( - f"time data \"{val}\" at position {i} doesn't match format specified" + f"time data \"{val}\" at position {i} doesn't " + "match format specified" ) return values, tz_out @@ -626,7 +628,10 @@ cpdef array_to_datetime( if is_coerce: iresult[i] = NPY_NAT continue - raise TypeError(f"invalid string coercion to datetime for \"{val}\" at position {i}") + raise TypeError( + f"invalid string coercion to datetime for \"{val}\" " + f"at position {i}" + ) if tz is not None: seen_datetime_offset = True diff --git a/pandas/_libs/tslibs/dtypes.pyx b/pandas/_libs/tslibs/dtypes.pyx index 94781374296fa..357227de2fc2c 100644 --- a/pandas/_libs/tslibs/dtypes.pyx +++ b/pandas/_libs/tslibs/dtypes.pyx @@ -396,7 +396,9 @@ cdef NPY_DATETIMEUNIT freq_group_code_to_npy_unit(int freq) nogil: # TODO: use in _matplotlib.converter? -cpdef int64_t periods_per_day(NPY_DATETIMEUNIT reso=NPY_DATETIMEUNIT.NPY_FR_ns) except? -1: +cpdef int64_t periods_per_day( + NPY_DATETIMEUNIT reso=NPY_DATETIMEUNIT.NPY_FR_ns +) except? -1: """ How many of the given time units fit into a single day? """ diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx index 3c7406d231241..dda26ad3bebc6 100644 --- a/pandas/_libs/tslibs/fields.pyx +++ b/pandas/_libs/tslibs/fields.pyx @@ -325,7 +325,11 @@ def get_start_end_field( @cython.wraparound(False) @cython.boundscheck(False) -def get_date_field(const int64_t[:] dtindex, str field, NPY_DATETIMEUNIT reso=NPY_FR_ns): +def get_date_field( + const int64_t[:] dtindex, + str field, + NPY_DATETIMEUNIT reso=NPY_FR_ns, +): """ Given a int64-based datetime index, extract the year, month, etc., field and return an array of these values. diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index 79299ec38e19c..26cd332c3007a 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -204,9 +204,10 @@ cdef class _NaT(datetime): return result # __rsub__ logic here - # TODO(cython3): remove this, move above code out of ``if not is_rsub`` block + # TODO(cython3): remove this, move above code out of + # ``if not is_rsub`` block # timedelta64 - NaT we have to treat NaT as timedelta64 - # for this to be meaningful, and the result is timedelta64 + # for this to be meaningful, and the result is timedelta64 result = np.empty(other.shape, dtype="timedelta64[ns]") result.fill("NaT") return result @@ -240,7 +241,8 @@ cdef class _NaT(datetime): result = np.empty(other.shape, dtype="timedelta64[ns]") result.fill("NaT") return result - # other cases are same, swap operands is allowed even though we subtract because this is NaT + # other cases are same, swap operands is allowed even though we subtract + # because this is NaT return self.__sub__(other) def __pos__(self): @@ -1201,6 +1203,7 @@ default 'raise' NaT """, ) + @property def tz(self) -> None: return None diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx index 07872050dc822..b1ff456c84a70 100644 --- a/pandas/_libs/tslibs/np_datetime.pyx +++ b/pandas/_libs/tslibs/np_datetime.pyx @@ -46,7 +46,7 @@ cdef extern from "src/datetime/np_datetime.h": npy_datetimestruct _S_MIN_DTS, _S_MAX_DTS npy_datetimestruct _M_MIN_DTS, _M_MAX_DTS - PyArray_DatetimeMetaData get_datetime_metadata_from_dtype(cnp.PyArray_Descr *dtype); + PyArray_DatetimeMetaData get_datetime_metadata_from_dtype(cnp.PyArray_Descr *dtype) cdef extern from "src/datetime/np_datetime_strings.h": int parse_iso_8601_datetime(const char *str, int len, int want_exc, @@ -171,7 +171,11 @@ class OutOfBoundsTimedelta(ValueError): pass -cdef get_implementation_bounds(NPY_DATETIMEUNIT reso, npy_datetimestruct *lower, npy_datetimestruct *upper): +cdef get_implementation_bounds( + NPY_DATETIMEUNIT reso, + npy_datetimestruct *lower, + npy_datetimestruct *upper, +): if reso == NPY_FR_ns: upper[0] = _NS_MAX_DTS lower[0] = _NS_MIN_DTS @@ -420,7 +424,6 @@ def compare_mismatched_resolutions(ndarray left, ndarray right, op): Py_ssize_t i, N = left.size npy_datetimestruct ldts, rdts - for i in range(N): # Analogous to: lval = lvalues[i] lval = (<int64_t*>cnp.PyArray_MultiIter_DATA(mi, 1))[0] @@ -511,7 +514,10 @@ cdef ndarray astype_round_check( @cython.overflowcheck(True) -cdef int64_t get_conversion_factor(NPY_DATETIMEUNIT from_unit, NPY_DATETIMEUNIT to_unit) except? -1: +cdef int64_t get_conversion_factor( + NPY_DATETIMEUNIT from_unit, + NPY_DATETIMEUNIT to_unit +) except? -1: """ Find the factor by which we need to multiply to convert from from_unit to to_unit. """ diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index bbb17d8a2bbcf..50d6a0a02b0cf 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -2262,7 +2262,12 @@ cdef class QuarterOffset(SingleConstructorOffset): def _apply_array(self, dtarr): reso = get_unit_from_dtype(dtarr.dtype) shifted = shift_quarters( - dtarr.view("i8"), self.n, self.startingMonth, self._day_opt, modby=3, reso=reso + dtarr.view("i8"), + self.n, + self.startingMonth, + self._day_opt, + modby=3, + reso=reso, ) return shifted @@ -2542,7 +2547,9 @@ cdef class SemiMonthOffset(SingleConstructorOffset): ndarray i8other = dtarr.view("i8") Py_ssize_t i, count = dtarr.size int64_t val, res_val - ndarray out = cnp.PyArray_EMPTY(i8other.ndim, i8other.shape, cnp.NPY_INT64, 0) + ndarray out = cnp.PyArray_EMPTY( + i8other.ndim, i8other.shape, cnp.NPY_INT64, 0 + ) npy_datetimestruct dts int months, to_day, nadj, n = self.n int days_in_month, day, anchor_dom = self.day_of_month @@ -2750,7 +2757,9 @@ cdef class Week(SingleConstructorOffset): cdef: Py_ssize_t i, count = i8other.size int64_t val, res_val - ndarray out = cnp.PyArray_EMPTY(i8other.ndim, i8other.shape, cnp.NPY_INT64, 0) + ndarray out = cnp.PyArray_EMPTY( + i8other.ndim, i8other.shape, cnp.NPY_INT64, 0 + ) npy_datetimestruct dts int wday, days, weeks, n = self.n int anchor_weekday = self.weekday @@ -3322,7 +3331,9 @@ cdef class FY5253Quarter(FY5253Mixin): for qlen in qtr_lens: if qlen * 7 <= tdelta.days: num_qtrs += 1 - tdelta -= (<_Timedelta>Timedelta(days=qlen * 7))._as_creso(norm._creso) + tdelta -= ( + <_Timedelta>Timedelta(days=qlen * 7) + )._as_creso(norm._creso) else: break else: @@ -4139,7 +4150,9 @@ cdef ndarray _shift_bdays( """ cdef: Py_ssize_t i, n = i8other.size - ndarray result = cnp.PyArray_EMPTY(i8other.ndim, i8other.shape, cnp.NPY_INT64, 0) + ndarray result = cnp.PyArray_EMPTY( + i8other.ndim, i8other.shape, cnp.NPY_INT64, 0 + ) int64_t val, res_val int wday, nadj, days npy_datetimestruct dts diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx index 1312124cfb77b..6f5b1e5b4e799 100644 --- a/pandas/_libs/tslibs/parsing.pyx +++ b/pandas/_libs/tslibs/parsing.pyx @@ -418,7 +418,9 @@ cdef parse_datetime_string_with_reso( from pandas import Timestamp parsed = Timestamp(date_string) else: - parsed = datetime(dts.year, dts.month, dts.day, dts.hour, dts.min, dts.sec, dts.us) + parsed = datetime( + dts.year, dts.month, dts.day, dts.hour, dts.min, dts.sec, dts.us + ) reso = { NPY_DATETIMEUNIT.NPY_FR_Y: "year", NPY_DATETIMEUNIT.NPY_FR_M: "month", @@ -717,7 +719,8 @@ def try_parse_dates( date = datetime.now() default = datetime(date.year, date.month, 1) - parse_date = lambda x: du_parse(x, dayfirst=dayfirst, default=default) + def parse_date(x): + return du_parse(x, dayfirst=dayfirst, default=default) # EAFP here try: @@ -1050,6 +1053,7 @@ def guess_datetime_format(dt_str: str, bint dayfirst=False) -> str | None: else: return None + cdef str _fill_token(token: str, padding: int): cdef str token_filled if '.' not in token: @@ -1064,6 +1068,7 @@ cdef str _fill_token(token: str, padding: int): token_filled = f'{seconds}.{nanoseconds}' return token_filled + @cython.wraparound(False) @cython.boundscheck(False) cdef inline object convert_to_unicode(object item, bint keep_trivial_numbers): diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index be6f87791284e..0e7cfa4dd9670 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -1053,7 +1053,9 @@ def period_asfreq_arr(ndarray[int64_t] arr, int freq1, int freq2, bint end): cdef: Py_ssize_t n = len(arr) Py_ssize_t increment = arr.strides[0] // 8 - ndarray[int64_t] result = cnp.PyArray_EMPTY(arr.ndim, arr.shape, cnp.NPY_INT64, 0) + ndarray[int64_t] result = cnp.PyArray_EMPTY( + arr.ndim, arr.shape, cnp.NPY_INT64, 0 + ) _period_asfreq( <int64_t*>cnp.PyArray_DATA(arr), @@ -1362,7 +1364,6 @@ def get_period_field_arr(str field, const int64_t[:] arr, int freq): cdef: Py_ssize_t i, sz int64_t[::1] out - accessor f func = _get_accessor_func(field) if func is NULL: @@ -1438,7 +1439,9 @@ def extract_ordinals(ndarray values, freq) -> np.ndarray: cdef: Py_ssize_t i, n = values.size int64_t ordinal - ndarray ordinals = cnp.PyArray_EMPTY(values.ndim, values.shape, cnp.NPY_INT64, 0) + ndarray ordinals = cnp.PyArray_EMPTY( + values.ndim, values.shape, cnp.NPY_INT64, 0 + ) cnp.broadcast mi = cnp.PyArray_MultiIterNew2(ordinals, values) object p @@ -1684,7 +1687,10 @@ cdef class _Period(PeriodMixin): raise IncompatibleFrequency("Input cannot be converted to " f"Period(freq={self.freqstr})") - if util.is_timedelta64_object(other) and get_timedelta64_value(other) == NPY_NAT: + if ( + util.is_timedelta64_object(other) and + get_timedelta64_value(other) == NPY_NAT + ): # i.e. np.timedelta64("nat") return NaT @@ -2478,7 +2484,8 @@ class Period(_Period): the start or the end of the period, but rather the entire period itself. freq : str, default None One of pandas period strings or corresponding objects. Accepted - strings are listed in the :ref:`offset alias section <timeseries.offset_aliases>` in the user docs. + strings are listed in the + :ref:`offset alias section <timeseries.offset_aliases>` in the user docs. ordinal : int, default None The period offset from the proleptic Gregorian epoch. year : int, default None @@ -2511,7 +2518,6 @@ class Period(_Period): # ('T', 5) but may be passed in as a string like '5T' # ordinal is the period offset from the gregorian proleptic epoch - cdef _Period self if freq is not None: freq = cls._maybe_convert_freq(freq) diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx index 6287c2fbc5d34..f540ad19c48d2 100644 --- a/pandas/_libs/tslibs/strptime.pyx +++ b/pandas/_libs/tslibs/strptime.pyx @@ -75,7 +75,6 @@ def array_strptime(ndarray[object] values, str fmt, bint exact=True, errors='rai int iso_week, iso_year int64_t us, ns object val, group_key, ampm, found, timezone - dict found_key bint is_raise = errors=='raise' bint is_ignore = errors=='ignore' bint is_coerce = errors=='coerce' diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index f3de67b705d4d..a96ec8c2ab80a 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -176,7 +176,9 @@ def ints_to_pytimedelta(ndarray m8values, box=False): # `it` iterates C-order as well, so the iteration matches # See discussion at # github.com/pandas-dev/pandas/pull/46886#discussion_r860261305 - ndarray result = cnp.PyArray_EMPTY(m8values.ndim, m8values.shape, cnp.NPY_OBJECT, 0) + ndarray result = cnp.PyArray_EMPTY( + m8values.ndim, m8values.shape, cnp.NPY_OBJECT, 0 + ) object[::1] res_flat = result.ravel() # should NOT be a copy ndarray arr = m8values.view("i8") @@ -468,7 +470,11 @@ cdef inline int64_t _item_to_timedelta64_fastpath(object item) except? -1: return parse_timedelta_string(item) -cdef inline int64_t _item_to_timedelta64(object item, str parsed_unit, str errors) except? -1: +cdef inline int64_t _item_to_timedelta64( + object item, + str parsed_unit, + str errors +) except? -1: """ See array_to_timedelta64. """ @@ -967,7 +973,6 @@ cdef _timedelta_from_value_and_reso(int64_t value, NPY_DATETIMEUNIT reso): "Only resolutions 's', 'ms', 'us', 'ns' are supported." ) - td_base.value = value td_base._is_populated = 0 td_base._creso = reso @@ -1570,8 +1575,6 @@ class Timedelta(_Timedelta): "milliseconds", "microseconds", "nanoseconds"} def __new__(cls, object value=_no_input, unit=None, **kwargs): - cdef _Timedelta td_base - if value is _no_input: if not len(kwargs): raise ValueError("cannot construct a Timedelta without a " @@ -1625,7 +1628,8 @@ class Timedelta(_Timedelta): if len(kwargs): # GH#48898 raise ValueError( - "Cannot pass both a Timedelta input and timedelta keyword arguments, got " + "Cannot pass both a Timedelta input and timedelta keyword " + "arguments, got " f"{list(kwargs.keys())}" ) return value @@ -1712,7 +1716,7 @@ class Timedelta(_Timedelta): @cython.cdivision(True) def _round(self, freq, mode): cdef: - int64_t result, unit, remainder + int64_t result, unit ndarray[int64_t] arr from pandas._libs.tslibs.offsets import to_offset @@ -1801,9 +1805,6 @@ class Timedelta(_Timedelta): __rmul__ = __mul__ def __truediv__(self, other): - cdef: - int64_t new_value - if _should_cast_to_timedelta(other): # We interpret NaT as timedelta64("NaT") other = Timedelta(other) diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index f80cb2e27cc23..afb93e34935f0 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -267,7 +267,6 @@ cdef class _Timestamp(ABCTimestamp): @classmethod def _from_value_and_reso(cls, int64_t value, NPY_DATETIMEUNIT reso, tzinfo tz): cdef: - npy_datetimestruct dts _TSObject obj = _TSObject() if value == NPY_NAT: @@ -294,7 +293,6 @@ cdef class _Timestamp(ABCTimestamp): # This is herely mainly so we can incrementally implement non-nano # (e.g. only tznaive at first) cdef: - npy_datetimestruct dts int64_t value NPY_DATETIMEUNIT reso @@ -317,7 +315,6 @@ cdef class _Timestamp(ABCTimestamp): def __richcmp__(_Timestamp self, object other, int op): cdef: _Timestamp ots - int ndim if isinstance(other, _Timestamp): ots = other @@ -368,7 +365,8 @@ cdef class _Timestamp(ABCTimestamp): return False elif op == Py_NE: return True - raise TypeError("Cannot compare Timestamp with datetime.date. " + raise TypeError( + "Cannot compare Timestamp with datetime.date. " "Use ts == pd.Timestamp(date) or ts.date() == date instead." ) else: @@ -1646,7 +1644,9 @@ class Timestamp(_Timestamp): if not is_offset_object(freq): freq = to_offset(freq) - return create_timestamp_from_ts(ts.value, ts.dts, ts.tzinfo, freq, ts.fold, ts.creso) + return create_timestamp_from_ts( + ts.value, ts.dts, ts.tzinfo, freq, ts.fold, ts.creso + ) def _round(self, freq, mode, ambiguous='raise', nonexistent='raise'): cdef: diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx index e2812178a2b43..28259c9db26e5 100644 --- a/pandas/_libs/tslibs/tzconversion.pyx +++ b/pandas/_libs/tslibs/tzconversion.pyx @@ -224,14 +224,13 @@ timedelta-like} """ cdef: ndarray[uint8_t, cast=True] ambiguous_array - Py_ssize_t i, idx, pos, n = vals.shape[0] - Py_ssize_t delta_idx_offset, delta_idx, pos_left, pos_right + Py_ssize_t i, n = vals.shape[0] + Py_ssize_t delta_idx_offset, delta_idx int64_t v, left, right, val, new_local, remaining_mins int64_t first_delta, delta int64_t shift_delta = 0 ndarray[int64_t] result_a, result_b, dst_hours int64_t[::1] result - npy_datetimestruct dts bint infer_dst = False, is_dst = False, fill = False bint shift_forward = False, shift_backward = False bint fill_nonexist = False diff --git a/pandas/_libs/tslibs/vectorized.pyx b/pandas/_libs/tslibs/vectorized.pyx index 6a6b156af3dc4..c828a9dfe0ccb 100644 --- a/pandas/_libs/tslibs/vectorized.pyx +++ b/pandas/_libs/tslibs/vectorized.pyx @@ -138,7 +138,7 @@ def ints_to_pydatetime( npy_datetimestruct dts tzinfo new_tz - bint use_date = False, use_time = False, use_ts = False, use_pydt = False + bint use_date = False, use_ts = False, use_pydt = False object res_val # Note that `result` (and thus `result_flat`) is C-order and @@ -154,11 +154,9 @@ def ints_to_pydatetime( use_date = True elif box == "timestamp": use_ts = True - elif box == "time": - use_time = True elif box == "datetime": use_pydt = True - else: + elif box != "time": raise ValueError( "box must be one of 'datetime', 'date', 'time' or 'timestamp'" ) diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx index 68c05f2bb2c98..702706f00455b 100644 --- a/pandas/_libs/window/aggregations.pyx +++ b/pandas/_libs/window/aggregations.pyx @@ -172,7 +172,9 @@ def roll_sum(const float64_t[:] values, ndarray[int64_t] start, add_sum(values[j], &nobs, &sum_x, &compensation_add, &num_consecutive_same_value, &prev_value) - output[i] = calc_sum(minp, nobs, sum_x, num_consecutive_same_value, prev_value) + output[i] = calc_sum( + minp, nobs, sum_x, num_consecutive_same_value, prev_value + ) if not is_monotonic_increasing_bounds: nobs = 0 @@ -209,9 +211,15 @@ cdef inline float64_t calc_mean(int64_t minp, Py_ssize_t nobs, Py_ssize_t neg_ct return result -cdef inline void add_mean(float64_t val, Py_ssize_t *nobs, float64_t *sum_x, - Py_ssize_t *neg_ct, float64_t *compensation, - int64_t *num_consecutive_same_value, float64_t *prev_value) nogil: +cdef inline void add_mean( + float64_t val, + Py_ssize_t *nobs, + float64_t *sum_x, + Py_ssize_t *neg_ct, + float64_t *compensation, + int64_t *num_consecutive_same_value, + float64_t *prev_value +) nogil: """ add a value from the mean calc using Kahan summation """ cdef: float64_t y, t @@ -296,7 +304,9 @@ def roll_mean(const float64_t[:] values, ndarray[int64_t] start, add_mean(val, &nobs, &sum_x, &neg_ct, &compensation_add, &num_consecutive_same_value, &prev_value) - output[i] = calc_mean(minp, nobs, neg_ct, sum_x, num_consecutive_same_value, prev_value) + output[i] = calc_mean( + minp, nobs, neg_ct, sum_x, num_consecutive_same_value, prev_value + ) if not is_monotonic_increasing_bounds: nobs = 0 @@ -309,8 +319,13 @@ def roll_mean(const float64_t[:] values, ndarray[int64_t] start, # Rolling variance -cdef inline float64_t calc_var(int64_t minp, int ddof, float64_t nobs, - float64_t ssqdm_x, int64_t num_consecutive_same_value) nogil: +cdef inline float64_t calc_var( + int64_t minp, + int ddof, + float64_t nobs, + float64_t ssqdm_x, + int64_t num_consecutive_same_value +) nogil: cdef: float64_t result @@ -328,9 +343,15 @@ cdef inline float64_t calc_var(int64_t minp, int ddof, float64_t nobs, return result -cdef inline void add_var(float64_t val, float64_t *nobs, float64_t *mean_x, - float64_t *ssqdm_x, float64_t *compensation, - int64_t *num_consecutive_same_value, float64_t *prev_value) nogil: +cdef inline void add_var( + float64_t val, + float64_t *nobs, + float64_t *mean_x, + float64_t *ssqdm_x, + float64_t *compensation, + int64_t *num_consecutive_same_value, + float64_t *prev_value, +) nogil: """ add a value from the var calc """ cdef: float64_t delta, prev_mean, y, t @@ -364,8 +385,13 @@ cdef inline void add_var(float64_t val, float64_t *nobs, float64_t *mean_x, ssqdm_x[0] = ssqdm_x[0] + (val - prev_mean) * (val - mean_x[0]) -cdef inline void remove_var(float64_t val, float64_t *nobs, float64_t *mean_x, - float64_t *ssqdm_x, float64_t *compensation) nogil: +cdef inline void remove_var( + float64_t val, + float64_t *nobs, + float64_t *mean_x, + float64_t *ssqdm_x, + float64_t *compensation +) nogil: """ remove a value from the var calc """ cdef: float64_t delta, prev_mean, y, t @@ -566,7 +592,7 @@ def roll_skew(ndarray[float64_t] values, ndarray[int64_t] start, ndarray[int64_t] end, int64_t minp) -> np.ndarray: cdef: Py_ssize_t i, j - float64_t val, prev, min_val, mean_val, sum_val = 0 + float64_t val, min_val, mean_val, sum_val = 0 float64_t compensation_xxx_add, compensation_xxx_remove float64_t compensation_xx_add, compensation_xx_remove float64_t compensation_x_add, compensation_x_remove @@ -574,7 +600,7 @@ def roll_skew(ndarray[float64_t] values, ndarray[int64_t] start, float64_t prev_value int64_t nobs = 0, N = len(start), V = len(values), nobs_mean = 0 int64_t s, e, num_consecutive_same_value - ndarray[float64_t] output, mean_array, values_copy + ndarray[float64_t] output, values_copy bint is_monotonic_increasing_bounds minp = max(minp, 3) @@ -779,7 +805,7 @@ def roll_kurt(ndarray[float64_t] values, ndarray[int64_t] start, ndarray[int64_t] end, int64_t minp) -> np.ndarray: cdef: Py_ssize_t i, j - float64_t val, prev, mean_val, min_val, sum_val = 0 + float64_t val, mean_val, min_val, sum_val = 0 float64_t compensation_xxxx_add, compensation_xxxx_remove float64_t compensation_xxx_remove, compensation_xxx_add float64_t compensation_xx_remove, compensation_xx_add @@ -853,7 +879,8 @@ def roll_kurt(ndarray[float64_t] values, ndarray[int64_t] start, &compensation_xxx_add, &compensation_xxxx_add, &num_consecutive_same_value, &prev_value) - output[i] = calc_kurt(minp, nobs, x, xx, xxx, xxxx, num_consecutive_same_value) + output[i] = calc_kurt(minp, nobs, x, xx, xxx, xxxx, + num_consecutive_same_value) if not is_monotonic_increasing_bounds: nobs = 0 @@ -876,7 +903,7 @@ def roll_median_c(const float64_t[:] values, ndarray[int64_t] start, bint err = False, is_monotonic_increasing_bounds int midpoint, ret = 0 int64_t nobs = 0, N = len(start), s, e, win - float64_t val, res, prev + float64_t val, res skiplist_t *sl ndarray[float64_t] output @@ -1149,7 +1176,7 @@ def roll_quantile(const float64_t[:] values, ndarray[int64_t] start, Py_ssize_t i, j, s, e, N = len(start), idx int ret = 0 int64_t nobs = 0, win - float64_t val, prev, midpoint, idx_with_fraction + float64_t val, idx_with_fraction float64_t vlow, vhigh skiplist_t *skiplist InterpolationType interpolation_type @@ -1275,7 +1302,7 @@ def roll_rank(const float64_t[:] values, ndarray[int64_t] start, derived from roll_quantile """ cdef: - Py_ssize_t i, j, s, e, N = len(start), idx + Py_ssize_t i, j, s, e, N = len(start) float64_t rank_min = 0, rank = 0 int64_t nobs = 0, win float64_t val diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py index 3e4f116953cb3..89ac1c10254cb 100644 --- a/pandas/errors/__init__.py +++ b/pandas/errors/__init__.py @@ -283,7 +283,7 @@ class SettingWithCopyError(ValueError): The ``mode.chained_assignment`` needs to be set to set to 'raise.' This can happen unintentionally when chained indexing. - For more information on eveluation order, + For more information on evaluation order, see :ref:`the user guide<indexing.evaluation_order>`. For more information on view vs. copy, @@ -306,7 +306,7 @@ class SettingWithCopyWarning(Warning): 'Warn' is the default option. This can happen unintentionally when chained indexing. - For more information on eveluation order, + For more information on evaluation order, see :ref:`the user guide<indexing.evaluation_order>`. For more information on view vs. copy, diff --git a/pandas/io/clipboard/__init__.py b/pandas/io/clipboard/__init__.py index 85fae6da07827..78289174b7e68 100644 --- a/pandas/io/clipboard/__init__.py +++ b/pandas/io/clipboard/__init__.py @@ -282,7 +282,7 @@ def copy_dev_clipboard(text): stacklevel=find_stack_level(), ) - with open("/dev/clipboard", "wt") as fd: + with open("/dev/clipboard", "w") as fd: fd.write(text) def paste_dev_clipboard() -> str: diff --git a/pandas/io/sas/byteswap.pyx b/pandas/io/sas/byteswap.pyx index 4620403910274..2a4d3f66a5d7d 100644 --- a/pandas/io/sas/byteswap.pyx +++ b/pandas/io/sas/byteswap.pyx @@ -1,5 +1,6 @@ """ -The following are faster versions of struct.unpack that avoid the overhead of Python function calls. +The following are faster versions of struct.unpack that avoid the overhead of Python +function calls. In the SAS7BDAT parser, they may be called up to (n_rows * n_cols) times. """ diff --git a/pandas/io/sas/sas.pyx b/pandas/io/sas/sas.pyx index b9897434666ef..8c13566c656b7 100644 --- a/pandas/io/sas/sas.pyx +++ b/pandas/io/sas/sas.pyx @@ -254,8 +254,16 @@ cdef: def _init_subheader_signatures(): - subheaders_32bit = [(sig, idx) for sig, idx in const.subheader_signature_to_index.items() if len(sig) == 4] - subheaders_64bit = [(sig, idx) for sig, idx in const.subheader_signature_to_index.items() if len(sig) == 8] + subheaders_32bit = [ + (sig, idx) + for sig, idx in const.subheader_signature_to_index.items() + if len(sig) == 4 + ] + subheaders_64bit = [ + (sig, idx) + for sig, idx in const.subheader_signature_to_index.items() + if len(sig) == 8 + ] assert len(subheaders_32bit) == 13 assert len(subheaders_64bit) == 17 assert len(const.subheader_signature_to_index) == 13 + 17 @@ -491,7 +499,8 @@ cdef class Parser: rpos = self.decompress(source, decompressed_source) if rpos != self.row_length: raise ValueError( - f"Expected decompressed line of length {self.row_length} bytes but decompressed {rpos} bytes" + f"Expected decompressed line of length {self.row_length} bytes " + f"but decompressed {rpos} bytes" ) source = decompressed_source
closes #49406
https://api.github.com/repos/pandas-dev/pandas/pulls/49428
2022-10-31T21:50:42Z
2022-11-04T10:17:35Z
2022-11-04T10:17:35Z
2022-11-04T10:17:36Z
enable pylint wildcard-import
diff --git a/pyproject.toml b/pyproject.toml index ddecebaec7c72..4c86fa4b0589a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -185,8 +185,7 @@ disable = [ "unused-wildcard-import", "using-constant-test", "useless-else-on-loop", - "useless-parent-delegation", - "wildcard-import" + "useless-parent-delegation" ] [tool.pytest.ini_options]
Issue #48855. This PR enables pylint type "W" warning: `wildcard-import`. - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
https://api.github.com/repos/pandas-dev/pandas/pulls/49427
2022-10-31T20:21:49Z
2022-11-01T12:09:13Z
2022-11-01T12:09:13Z
2022-11-01T12:09:13Z
CLN: assorted; silence test warnings
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 37b87f92971cc..bbb17d8a2bbcf 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -1058,12 +1058,6 @@ cdef class Tick(SingleConstructorOffset): if util.is_timedelta64_object(other) or PyDelta_Check(other): return other + self.delta - elif isinstance(other, type(self)): - # TODO(2.0): remove once apply deprecation is enforced. - # This is reached in tests that specifically call apply, - # but should not be reached "naturally" because __add__ should - # catch this case first. - return type(self)(self.n + other.n) raise ApplyTypeError(f"Unhandled type: {type(other).__name__}") diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index ca54ab163ab64..ab62e60a26407 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -292,6 +292,7 @@ def _from_sequence(cls, scalars, *, dtype=None, copy: bool = False): def _from_sequence_not_strict( cls, data, + *, dtype=None, copy: bool = False, tz=lib.no_default, @@ -300,6 +301,9 @@ def _from_sequence_not_strict( yearfirst: bool = False, ambiguous: TimeAmbiguous = "raise", ): + """ + A non-strict version of _from_sequence, called from DatetimeIndex.__new__. + """ explicit_none = freq is None freq = freq if freq is not lib.no_default else None freq, freq_infer = dtl.maybe_infer_freq(freq) @@ -1976,7 +1980,6 @@ def _sequence_to_dt64ns( yearfirst: bool = False, ambiguous: TimeAmbiguous = "raise", allow_mixed: bool = False, - require_iso8601: bool = False, ): """ Parameters @@ -1990,8 +1993,6 @@ def _sequence_to_dt64ns( See pandas._libs.tslibs.tzconversion.tz_localize_to_utc. allow_mixed : bool, default False Interpret integers as timestamps when datetime objects are also present. - require_iso8601 : bool, default False - Only consider ISO-8601 formats when parsing strings. Returns ------- @@ -2038,7 +2039,6 @@ def _sequence_to_dt64ns( yearfirst=yearfirst, allow_object=False, allow_mixed=allow_mixed, - require_iso8601=require_iso8601, ) if tz and inferred_tz: # two timezones: convert to intended from base UTC repr diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 92b9222cfc9bc..5bb4ae94d1849 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -215,11 +215,15 @@ def _from_sequence(cls, data, *, dtype=None, copy: bool = False) -> TimedeltaArr def _from_sequence_not_strict( cls, data, + *, dtype=None, copy: bool = False, freq=lib.no_default, unit=None, ) -> TimedeltaArray: + """ + A non-strict version of _from_sequence, called from TimedeltaIndex.__new__. + """ if dtype: dtype = _validate_td64_dtype(dtype) @@ -296,7 +300,6 @@ def _unbox_scalar(self, value, setitem: bool = False) -> np.timedelta64: return np.timedelta64(value.value, "ns") else: return value._as_unit(self._unit).asm8 - return np.timedelta64(value.value, "ns") def _scalar_from_string(self, value) -> Timedelta | NaTType: return Timedelta(value) diff --git a/pandas/core/base.py b/pandas/core/base.py index db81d3e396b50..2145ecd10a7b0 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -294,6 +294,7 @@ def _values(self) -> ExtensionArray | np.ndarray: # must be defined here as a property for mypy raise AbstractMethodError(self) + @final def transpose(self: _T, *args, **kwargs) -> _T: """ Return the transpose, which is by definition self. @@ -330,6 +331,7 @@ def ndim(self) -> Literal[1]: """ return 1 + @final def item(self): """ Return the first element of the underlying data as a Python scalar. @@ -427,6 +429,7 @@ def array(self) -> ExtensionArray: """ raise AbstractMethodError(self) + @final def to_numpy( self, dtype: npt.DTypeLike | None = None, @@ -542,6 +545,7 @@ def to_numpy( result[np.asanyarray(self.isna())] = na_value return result + @final @property def empty(self) -> bool: return not self.size @@ -902,6 +906,7 @@ def _map_values(self, mapper, na_action=None): return new_values + @final def value_counts( self, normalize: bool = False, @@ -1006,6 +1011,7 @@ def unique(self): result = unique1d(values) return result + @final def nunique(self, dropna: bool = True) -> int: """ Return number of unique elements in the object. @@ -1103,6 +1109,7 @@ def is_monotonic_decreasing(self) -> bool: return Index(self).is_monotonic_decreasing + @final def _memory_usage(self, deep: bool = False) -> int: """ Memory usage of the values. diff --git a/pandas/core/dtypes/astype.py b/pandas/core/dtypes/astype.py index 718badc2e4085..ebd45da044ad9 100644 --- a/pandas/core/dtypes/astype.py +++ b/pandas/core/dtypes/astype.py @@ -129,8 +129,7 @@ def astype_nansafe( return arr.view(dtype) elif dtype.kind == "m": - # TODO(2.0): change to use the same logic as TDA.astype, i.e. - # giving the requested dtype for supported units (s, ms, us, ns) + # give the requested dtype for supported units (s, ms, us, ns) # and doing the old convert-to-float behavior otherwise. if is_supported_unit(get_unit_from_dtype(arr.dtype)): from pandas.core.construction import ensure_wrapped_if_datetimelike diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index f1c7e5b5fae42..1c2632adc9e7f 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -41,6 +41,7 @@ Dtype, DtypeObj, Scalar, + npt, ) from pandas.errors import ( IntCastingNaNError, @@ -1199,7 +1200,7 @@ def convert_dtypes( def maybe_infer_to_datetimelike( - value: np.ndarray, + value: npt.NDArray[np.object_], ) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray | IntervalArray: """ we might have a array (or single object) that is datetime like, diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py index 42630845bf6b2..a2e9c059cbcc9 100644 --- a/pandas/core/groupby/base.py +++ b/pandas/core/groupby/base.py @@ -4,10 +4,7 @@ from __future__ import annotations import dataclasses -from typing import ( - Hashable, - Literal, -) +from typing import Hashable @dataclasses.dataclass(order=True, frozen=True) @@ -61,15 +58,6 @@ class OutputKey: # produces a result that has the same shape as the group. -# TODO(2.0) Remove after pad/backfill deprecation enforced -def maybe_normalize_deprecated_kernels(kernel) -> Literal["bfill", "ffill"]: - if kernel == "backfill": - kernel = "bfill" - elif kernel == "pad": - kernel = "ffill" - return kernel - - transformation_kernels = frozenset( [ "bfill", diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index d8300bb29c274..22294cad6d0fd 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -5797,20 +5797,6 @@ def _should_fallback_to_positional(self) -> bool: """ return not self.holds_integer() - def _get_values_for_loc(self, series: Series, loc, key): - """ - Do a positional lookup on the given Series, returning either a scalar - or a Series. - - Assumes that `series.index is self` - - key is included for MultiIndex compat. - """ - if is_integer(loc): - return series._values[loc] - - return series.iloc[loc] - _index_shared_docs[ "get_indexer_non_unique" ] = """ @@ -6803,6 +6789,7 @@ def _cmp_method(self, other, op): return result + @final def _construct_result(self, result, name): if isinstance(result, tuple): return ( diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 3b8380a88bb8b..02edb8b971305 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -2539,26 +2539,6 @@ def _should_fallback_to_positional(self) -> bool: # GH#33355 return self.levels[0]._should_fallback_to_positional - def _get_values_for_loc(self, series: Series, loc, key): - """ - Do a positional lookup on the given Series, returning either a scalar - or a Series. - - Assumes that `series.index is self` - """ - new_values = series._values[loc] - if is_scalar(loc): - return new_values - - if len(new_values) == 1 and not self.nlevels > 1: - # If more than one level left, we can not return a scalar - return new_values[0] - - new_index = self[loc] - new_index = maybe_droplevels(new_index, key) - new_ser = series._constructor(new_values, index=new_index, name=series.name) - return new_ser.__finalize__(series) - def _get_indexer_strict( self, key, axis_name: str ) -> tuple[Index, npt.NDArray[np.intp]]: diff --git a/pandas/core/series.py b/pandas/core/series.py index 9607d57766b11..1c328cebdd50a 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -705,13 +705,13 @@ def array(self) -> ExtensionArray: return self._mgr.array_values() # ops - def ravel(self, order: str = "C") -> np.ndarray: + def ravel(self, order: str = "C") -> ArrayLike: """ - Return the flattened underlying data as an ndarray. + Return the flattened underlying data as an ndarray or ExtensionArray. Returns ------- - numpy.ndarray or ndarray-like + numpy.ndarray or ExtensionArray Flattened data of the Series. See Also diff --git a/pandas/io/sas/sas.pyx b/pandas/io/sas/sas.pyx index 9406900b69998..b9897434666ef 100644 --- a/pandas/io/sas/sas.pyx +++ b/pandas/io/sas/sas.pyx @@ -69,6 +69,7 @@ cdef int rle_decompress(Buffer inbuff, Buffer outbuff) except? 0: int rpos = 0 int i, nbytes, end_of_first_byte size_t ipos = 0 + Py_ssize_t _ while ipos < inbuff.length: control_byte = buf_get(inbuff, ipos) & 0xF0 @@ -366,9 +367,9 @@ cdef class Parser: def read(self, int nrows): cdef: bint done - int i + Py_ssize_t i - for _ in range(nrows): + for i in range(nrows): done = self.readline() if done: break diff --git a/pandas/tests/apply/test_str.py b/pandas/tests/apply/test_str.py index 36182c46bfd67..61c879fb2b20f 100644 --- a/pandas/tests/apply/test_str.py +++ b/pandas/tests/apply/test_str.py @@ -12,7 +12,6 @@ Series, ) import pandas._testing as tm -from pandas.core.groupby.base import maybe_normalize_deprecated_kernels from pandas.tests.apply.common import ( frame_transform_kernels, series_transform_kernels, @@ -251,8 +250,6 @@ def test_transform_groupby_kernel_series(request, string_series, op): request.node.add_marker( pytest.mark.xfail(raises=ValueError, reason="ngroup not valid for NDFrame") ) - # TODO(2.0) Remove after pad/backfill deprecation enforced - op = maybe_normalize_deprecated_kernels(op) args = [0.0] if op == "fillna" else [] ones = np.ones(string_series.shape[0]) expected = string_series.groupby(ones).transform(op, *args) @@ -262,8 +259,6 @@ def test_transform_groupby_kernel_series(request, string_series, op): @pytest.mark.parametrize("op", frame_transform_kernels) def test_transform_groupby_kernel_frame(request, axis, float_frame, op): - # TODO(2.0) Remove after pad/backfill deprecation enforced - op = maybe_normalize_deprecated_kernels(op) if op == "ngroup": request.node.add_marker( diff --git a/pandas/tests/arrays/test_timedeltas.py b/pandas/tests/arrays/test_timedeltas.py index 75dff66a91365..f5d50465fee10 100644 --- a/pandas/tests/arrays/test_timedeltas.py +++ b/pandas/tests/arrays/test_timedeltas.py @@ -173,8 +173,7 @@ def test_div_td_array(self, tda): tm.assert_numpy_array_equal(result, expected) def test_add_timedeltaarraylike(self, tda): - # TODO(2.0): just do `tda_nano = tda.astype("m8[ns]")` - tda_nano = TimedeltaArray(tda._ndarray.astype("m8[ns]")) + tda_nano = tda.astype("m8[ns]") expected = tda_nano * 2 res = tda_nano + tda diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py index 8979c145a223c..f68e38be44811 100644 --- a/pandas/tests/extension/test_arrow.py +++ b/pandas/tests/extension/test_arrow.py @@ -46,6 +46,10 @@ from pandas.core.arrays.arrow.dtype import ArrowDtype # isort:skip +pytestmark = pytest.mark.filterwarnings( + "ignore:.* may decrease performance. Upgrade to pyarrow >=7 to possibly" +) + @pytest.fixture(params=tm.ALL_PYARROW_DTYPES, ids=str) def dtype(request): diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py index c051119f0fec4..f82d3c6c06fca 100644 --- a/pandas/tests/extension/test_sparse.py +++ b/pandas/tests/extension/test_sparse.py @@ -206,13 +206,7 @@ def test_reindex(self, data, na_value): class TestIndex(base.BaseIndexTests): - - # TODO(2.0): should pass once SparseArray is stored directly in Index. - @pytest.mark.xfail(reason="Index cannot yet store sparse dtype") - def test_index_from_listlike_with_dtype(self, data): - msg = "passing a SparseArray to pd.Index" - with tm.assert_produces_warning(FutureWarning, match=msg): - super().test_index_from_listlike_with_dtype(data) + pass class TestMissing(BaseSparseTests, base.BaseMissingTests): diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 26f269d3d4384..daf7759b78b81 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -28,7 +28,6 @@ import pandas._testing as tm from pandas.core.arrays import BooleanArray import pandas.core.common as com -from pandas.core.groupby.base import maybe_normalize_deprecated_kernels from pandas.tests.groupby import get_groupby_method_args @@ -1873,6 +1872,7 @@ def test_pivot_table_values_key_error(): @pytest.mark.parametrize( "op", ["idxmax", "idxmin", "min", "max", "sum", "prod", "skew"] ) +@pytest.mark.filterwarnings("ignore:The default value of numeric_only:FutureWarning") @pytest.mark.filterwarnings("ignore:Dropping invalid columns:FutureWarning") @pytest.mark.filterwarnings("ignore:.*Select only valid:FutureWarning") def test_empty_groupby(columns, keys, values, method, op, request, using_array_manager): @@ -2316,8 +2316,6 @@ def test_group_on_empty_multiindex(transformation_func, request): def test_dup_labels_output_shape(groupby_func, idx): if groupby_func in {"size", "ngroup", "cumcount"}: pytest.skip(f"Not applicable for {groupby_func}") - # TODO(2.0) Remove after pad/backfill deprecation enforced - groupby_func = maybe_normalize_deprecated_kernels(groupby_func) df = DataFrame([[1, 1]], columns=idx) grp_by = df.groupby([0]) diff --git a/pandas/tests/groupby/test_groupby_subclass.py b/pandas/tests/groupby/test_groupby_subclass.py index b8aa2a1c9656d..eeedb3d6bb1d0 100644 --- a/pandas/tests/groupby/test_groupby_subclass.py +++ b/pandas/tests/groupby/test_groupby_subclass.py @@ -9,7 +9,6 @@ Series, ) import pandas._testing as tm -from pandas.core.groupby.base import maybe_normalize_deprecated_kernels from pandas.tests.groupby import get_groupby_method_args @@ -25,8 +24,6 @@ def test_groupby_preserves_subclass(obj, groupby_func): if isinstance(obj, Series) and groupby_func in {"corrwith"}: pytest.skip(f"Not applicable for Series and {groupby_func}") - # TODO(2.0) Remove after pad/backfill deprecation enforced - groupby_func = maybe_normalize_deprecated_kernels(groupby_func) grouped = obj.groupby(np.arange(0, 10)) diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py index 2b4eba539ec82..119b9929eea22 100644 --- a/pandas/tests/groupby/transform/test_transform.py +++ b/pandas/tests/groupby/transform/test_transform.py @@ -20,7 +20,6 @@ date_range, ) import pandas._testing as tm -from pandas.core.groupby.base import maybe_normalize_deprecated_kernels from pandas.core.groupby.generic import DataFrameGroupBy from pandas.tests.groupby import get_groupby_method_args @@ -166,9 +165,6 @@ def test_transform_broadcast(tsframe, ts): def test_transform_axis_1(request, transformation_func): # GH 36308 - # TODO(2.0) Remove after pad/backfill deprecation enforced - transformation_func = maybe_normalize_deprecated_kernels(transformation_func) - if transformation_func == "ngroup": msg = "ngroup fails with axis=1: #45986" request.node.add_marker(pytest.mark.xfail(reason=msg)) @@ -373,8 +369,6 @@ def test_transform_transformation_func(request, transformation_func): }, index=date_range("2020-01-01", "2020-01-07"), ) - # TODO(2.0) Remove after pad/backfill deprecation enforced - transformation_func = maybe_normalize_deprecated_kernels(transformation_func) if transformation_func == "cumcount": test_op = lambda x: x.transform("cumcount") mock_op = lambda x: Series(range(len(x)), x.index) diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py index 8ad15ac05e26a..876799c49e138 100644 --- a/pandas/tests/io/excel/test_readers.py +++ b/pandas/tests/io/excel/test_readers.py @@ -577,7 +577,7 @@ def test_use_nullabla_dtypes_and_dtype(self, read_ext): @td.skip_if_no("pyarrow") @pytest.mark.parametrize("storage", ["pyarrow", "python"]) - def test_use_nullabla_dtypes_string(self, read_ext, storage): + def test_use_nullable_dtypes_string(self, read_ext, storage): # GH#36712 if read_ext in (".xlsb", ".xls"): pytest.skip(f"No engine for filetype: '{read_ext}'") diff --git a/pandas/tests/io/parser/dtypes/test_dtypes_basic.py b/pandas/tests/io/parser/dtypes/test_dtypes_basic.py index e2c7f77aae815..f5b3b608bd59e 100644 --- a/pandas/tests/io/parser/dtypes/test_dtypes_basic.py +++ b/pandas/tests/io/parser/dtypes/test_dtypes_basic.py @@ -437,7 +437,7 @@ def test_use_nullabla_dtypes_and_dtype(all_parsers): @td.skip_if_no("pyarrow") @pytest.mark.parametrize("storage", ["pyarrow", "python"]) -def test_use_nullabla_dtypes_string(all_parsers, storage): +def test_use_nullable_dtypes_string(all_parsers, storage): # GH#36712 import pyarrow as pa diff --git a/pandas/tests/io/pytables/test_subclass.py b/pandas/tests/io/pytables/test_subclass.py index 823d2875c5417..27843415f367e 100644 --- a/pandas/tests/io/pytables/test_subclass.py +++ b/pandas/tests/io/pytables/test_subclass.py @@ -13,6 +13,9 @@ ) pytest.importorskip("tables") +pytestmark = pytest.mark.filterwarnings( + "ignore:`np.object` is a deprecated alias:DeprecationWarning" +) class TestHDFStoreSubclass: diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index 780e3003d50d7..dd50d97c47f37 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -734,8 +734,6 @@ def test_other_datetime_unit(self, unit): if unit in ["D", "h", "m"]: # not supported so we cast to the nearest supported unit, seconds - # TODO(2.0): cast to nearest (second) instead of ns - # coerces to datetime64[ns], thus should not be affected exp_dtype = "datetime64[s]" else: exp_dtype = dtype diff --git a/pandas/tests/tseries/offsets/test_ticks.py b/pandas/tests/tseries/offsets/test_ticks.py index 7e7f6dc86b8f9..13619c2c0c828 100644 --- a/pandas/tests/tseries/offsets/test_ticks.py +++ b/pandas/tests/tseries/offsets/test_ticks.py @@ -44,7 +44,7 @@ def test_apply_ticks(): - result = offsets.Hour(3)._apply(offsets.Hour(4)) + result = offsets.Hour(3) + offsets.Hour(4) exp = offsets.Hour(7) assert result == exp @@ -74,7 +74,6 @@ def test_tick_add_sub(cls, n, m): expected = cls(n + m) assert left + right == expected - assert left._apply(right) == expected expected = cls(n - m) assert left - right == expected
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/49426
2022-10-31T19:59:15Z
2022-11-02T16:36:50Z
2022-11-02T16:36:50Z
2022-11-02T18:56:22Z
enable pylint unused-wildcard-import
diff --git a/pyproject.toml b/pyproject.toml index ddecebaec7c72..d04ccaa7b9dad 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -182,7 +182,6 @@ disable = [ "unused-argument", "unused-import", "unused-variable", - "unused-wildcard-import", "using-constant-test", "useless-else-on-loop", "useless-parent-delegation",
Issue #48855. This PR enables pylint type "W" warning: `unused-wildcard-import`. - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
https://api.github.com/repos/pandas-dev/pandas/pulls/49425
2022-10-31T19:58:34Z
2022-11-01T12:10:17Z
2022-11-01T12:10:17Z
2022-11-01T12:10:17Z
TST: Remove unnecessary pytest fixture scopes
diff --git a/pandas/tests/arithmetic/conftest.py b/pandas/tests/arithmetic/conftest.py index e847f31cd3f9c..b734344d25174 100644 --- a/pandas/tests/arithmetic/conftest.py +++ b/pandas/tests/arithmetic/conftest.py @@ -12,9 +12,7 @@ from pandas.core.computation import expressions as expr -@pytest.fixture( - autouse=True, scope="module", params=[0, 1000000], ids=["numexpr", "python"] -) +@pytest.fixture(autouse=True, params=[0, 1000000], ids=["numexpr", "python"]) def switch_numexpr_min_elements(request): _MIN_ELEMENTS = expr._MIN_ELEMENTS expr._MIN_ELEMENTS = request.param diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index 9fc3161a7b004..822e761f63ae8 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -31,9 +31,7 @@ ) -@pytest.fixture( - autouse=True, scope="module", params=[0, 1000000], ids=["numexpr", "python"] -) +@pytest.fixture(autouse=True, params=[0, 1000000], ids=["numexpr", "python"]) def switch_numexpr_min_elements(request): _MIN_ELEMENTS = expr._MIN_ELEMENTS expr._MIN_ELEMENTS = request.param diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py index 4da57fc177712..e81837898c927 100644 --- a/pandas/tests/frame/test_query_eval.py +++ b/pandas/tests/frame/test_query_eval.py @@ -1112,7 +1112,7 @@ def test_invalid_type_for_operator_raises(self, parser, engine, op): class TestDataFrameQueryBacktickQuoting: - @pytest.fixture(scope="class") + @pytest.fixture def df(self): """ Yields a dataframe with strings that may or may not need escaping diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 37c13c37d070b..5d077f014dc89 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -22,7 +22,7 @@ import pandas.core.common as com -@pytest.fixture(scope="class", params=[None, "foo"]) +@pytest.fixture(params=[None, "foo"]) def name(request): return request.param diff --git a/pandas/tests/indexes/interval/test_interval_range.py b/pandas/tests/indexes/interval/test_interval_range.py index 2f28c33a3bbc6..18b5af00c8d5d 100644 --- a/pandas/tests/indexes/interval/test_interval_range.py +++ b/pandas/tests/indexes/interval/test_interval_range.py @@ -20,7 +20,7 @@ from pandas.tseries.offsets import Day -@pytest.fixture(scope="class", params=[None, "foo"]) +@pytest.fixture(params=[None, "foo"]) def name(request): return request.param diff --git a/pandas/tests/indexes/interval/test_interval_tree.py b/pandas/tests/indexes/interval/test_interval_tree.py index 3b9de8d9e45d9..45b25f2533afd 100644 --- a/pandas/tests/indexes/interval/test_interval_tree.py +++ b/pandas/tests/indexes/interval/test_interval_tree.py @@ -18,7 +18,7 @@ def skipif_32bit(param): return pytest.param(param, marks=marks) -@pytest.fixture(scope="class", params=["int64", "float64", "uint64"]) +@pytest.fixture(params=["int64", "float64", "uint64"]) def dtype(request): return request.param diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py index 0ea1203359153..dcfec8848849f 100644 --- a/pandas/tests/io/test_html.py +++ b/pandas/tests/io/test_html.py @@ -101,7 +101,6 @@ def test_same_ordering(datapath): pytest.param("bs4", marks=[td.skip_if_no("bs4"), td.skip_if_no("html5lib")]), pytest.param("lxml", marks=td.skip_if_no("lxml")), ], - scope="class", ) class TestReadHtml: @pytest.fixture @@ -112,7 +111,7 @@ def spam_data(self, datapath): def banklist_data(self, datapath): return datapath("io", "data", "html", "banklist.html") - @pytest.fixture(autouse=True, scope="function") + @pytest.fixture(autouse=True) def set_defaults(self, flavor): self.read_html = partial(read_html, flavor=flavor) yield diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py index c8044a44b48ee..37711054f2285 100644 --- a/pandas/tests/series/test_arithmetic.py +++ b/pandas/tests/series/test_arithmetic.py @@ -30,9 +30,7 @@ from pandas.core.computation import expressions as expr -@pytest.fixture( - autouse=True, scope="module", params=[0, 1000000], ids=["numexpr", "python"] -) +@pytest.fixture(autouse=True, params=[0, 1000000], ids=["numexpr", "python"]) def switch_numexpr_min_elements(request): _MIN_ELEMENTS = expr._MIN_ELEMENTS expr._MIN_ELEMENTS = request.param
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). I've noticed teardown of fixtures with a greater scope than `function` can take more time e.g. in `pandas/tests/arithmetic/test_numeric.py` Before: ``` ==================================================================================== slowest 30 durations ==================================================================================== 0.77s call pandas/tests/arithmetic/test_numeric.py::TestNumericComparisons::test_numeric_cmp_string_numexpr_path[numexpr-array] 0.76s call pandas/tests/arithmetic/test_numeric.py::TestNumericComparisons::test_numeric_cmp_string_numexpr_path[python-array] 0.34s call pandas/tests/arithmetic/test_timedelta64.py::TestTimedelta64ArithmeticUnsorted::test_tda_add_dt64_object_array[python-array-pytz.FixedOffset(-300)] 0.26s call pandas/tests/arithmetic/test_datetime64.py::TestDatetime64Arithmetic::test_dt64arr_sub_timedeltalike_scalar[python-pytz.FixedOffset(-300)-Timedelta-DataFrame] 0.19s call pandas/tests/arithmetic/test_datetime64.py::TestDatetime64Arithmetic::test_dt64arr_add_sub_parr[numexpr-Series-DataFrame-None-D] 0.18s teardown pandas/tests/arithmetic/test_datetime64.py::test_dt64arr_addsub_object_dtype_2d[python] 0.14s call pandas/tests/arithmetic/test_datetime64.py::TestDatetime64SeriesComparison::test_dt64arr_timestamp_equality[numexpr-DataFrame] 0.05s call pandas/tests/arithmetic/test_datetime64.py::TestDatetime64DateOffsetArithmetic::test_dt64arr_add_sub_relativedelta_offsets[numexpr-DataFrame] 0.05s call pandas/tests/arithmetic/test_datetime64.py::TestDatetime64DateOffsetArithmetic::test_dt64arr_add_sub_relativedelta_offsets[python-DataFrame] 0.04s teardown pandas/tests/arithmetic/test_timedelta64.py::test_add_timestamp_to_timedelta[python] 0.04s teardown pandas/tests/arithmetic/test_numeric.py::test_empty_str_comparison[python-5-5] ... ================================================================= 29826 passed, 72 xfailed, 12 warnings in 147.93s (0:02:27) ================================================================= ``` After ``` ==================================================================================== slowest 30 durations ==================================================================================== 0.75s call pandas/tests/arithmetic/test_numeric.py::TestNumericComparisons::test_numeric_cmp_string_numexpr_path[python-array] 0.75s call pandas/tests/arithmetic/test_numeric.py::TestNumericComparisons::test_numeric_cmp_string_numexpr_path[numexpr-array] 0.28s call pandas/tests/arithmetic/test_timedelta64.py::TestTimedeltaArraylikeAddSubOps::test_td64arr_add_sub_datetimelike_scalar[numexpr-DataFrame-pytz.FixedOffset(300)-Timestamp] 0.19s call pandas/tests/arithmetic/test_datetime64.py::TestDatetime64Arithmetic::test_dt64arr_addsub_intlike[numexpr-array-zoneinfo.ZoneInfo(key='UTC')-uint8-Q] 0.15s call pandas/tests/arithmetic/test_datetime64.py::TestDatetime64ArrayLikeComparisons::test_dt64arr_cmp_scalar_invalid[python-datetime.timezone.utc-Index-foo] 0.12s call pandas/tests/arithmetic/test_datetime64.py::TestDatetime64SeriesComparison::test_dt64arr_timestamp_equality[numexpr-DataFrame] 0.05s call pandas/tests/arithmetic/test_datetime64.py::TestDatetime64DateOffsetArithmetic::test_dt64arr_add_sub_relativedelta_offsets[numexpr-DataFrame] 0.05s call pandas/tests/arithmetic/test_datetime64.py::TestDatetime64DateOffsetArithmetic::test_dt64arr_add_sub_relativedelta_offsets[python-DataFrame] 0.03s call pandas/tests/arithmetic/test_timedelta64.py::TestTimedeltaArraylikeAddSubOps::test_timedelta64_ops_nat[numexpr] 0.03s call pandas/tests/arithmetic/test_timedelta64.py::TestTimedeltaArraylikeAddSubOps::test_timedelta64_ops_nat[python] 0.03s call pandas/tests/arithmetic/test_datetime64.py::TestDatetime64ArrayLikeComparisons::test_dt64arr_cmp_arraylike_inva ... ================================================================= 29826 passed, 72 xfailed, 12 warnings in 141.15s (0:02:21) ================================================================= ```
https://api.github.com/repos/pandas-dev/pandas/pulls/49424
2022-10-31T19:33:43Z
2022-11-01T14:48:07Z
2022-11-01T14:48:07Z
2022-11-01T16:47:12Z
CLN: test_nanops.py
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index 0e64181bd46a7..ae8791a774ed5 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -29,6 +29,175 @@ def skipna(request): return request.param +@pytest.fixture +def disable_bottleneck(monkeypatch): + with monkeypatch.context() as m: + m.setattr(nanops, "_USE_BOTTLENECK", False) + yield + + +@pytest.fixture +def arr_shape(): + return 11, 7 + + +@pytest.fixture +def arr_float(arr_shape): + np.random.seed(11235) + return np.random.randn(*arr_shape) + + +@pytest.fixture +def arr_complex(arr_float): + return arr_float + arr_float * 1j + + +@pytest.fixture +def arr_int(arr_shape): + np.random.seed(11235) + return np.random.randint(-10, 10, arr_shape) + + +@pytest.fixture +def arr_bool(arr_shape): + np.random.seed(11235) + return np.random.randint(0, 2, arr_shape) == 0 + + +@pytest.fixture +def arr_str(arr_float): + return np.abs(arr_float).astype("S") + + +@pytest.fixture +def arr_utf(arr_float): + return np.abs(arr_float).astype("U") + + +@pytest.fixture +def arr_date(arr_shape): + np.random.seed(11235) + return np.random.randint(0, 20000, arr_shape).astype("M8[ns]") + + +@pytest.fixture +def arr_tdelta(arr_shape): + np.random.seed(11235) + return np.random.randint(0, 20000, arr_shape).astype("m8[ns]") + + +@pytest.fixture +def arr_nan(arr_shape): + return np.tile(np.nan, arr_shape) + + +@pytest.fixture +def arr_float_nan(arr_float, arr_nan): + return np.vstack([arr_float, arr_nan]) + + +@pytest.fixture +def arr_nan_float1(arr_nan, arr_float): + return np.vstack([arr_nan, arr_float]) + + +@pytest.fixture +def arr_nan_nan(arr_nan): + return np.vstack([arr_nan, arr_nan]) + + +@pytest.fixture +def arr_inf(arr_float): + return arr_float * np.inf + + +@pytest.fixture +def arr_float_inf(arr_float, arr_inf): + return np.vstack([arr_float, arr_inf]) + + +@pytest.fixture +def arr_nan_inf(arr_nan, arr_inf): + return np.vstack([arr_nan, arr_inf]) + + +@pytest.fixture +def arr_float_nan_inf(arr_float, arr_nan, arr_inf): + return np.vstack([arr_float, arr_nan, arr_inf]) + + +@pytest.fixture +def arr_nan_nan_inf(arr_nan, arr_inf): + return np.vstack([arr_nan, arr_nan, arr_inf]) + + +@pytest.fixture +def arr_obj( + arr_float, arr_int, arr_bool, arr_complex, arr_str, arr_utf, arr_date, arr_tdelta +): + return np.vstack( + [ + arr_float.astype("O"), + arr_int.astype("O"), + arr_bool.astype("O"), + arr_complex.astype("O"), + arr_str.astype("O"), + arr_utf.astype("O"), + arr_date.astype("O"), + arr_tdelta.astype("O"), + ] + ) + + +@pytest.fixture +def arr_nan_nanj(arr_nan): + with np.errstate(invalid="ignore"): + return arr_nan + arr_nan * 1j + + +@pytest.fixture +def arr_complex_nan(arr_complex, arr_nan_nanj): + with np.errstate(invalid="ignore"): + return np.vstack([arr_complex, arr_nan_nanj]) + + +@pytest.fixture +def arr_nan_infj(arr_inf): + with np.errstate(invalid="ignore"): + return arr_inf * 1j + + +@pytest.fixture +def arr_complex_nan_infj(arr_complex, arr_nan_infj): + with np.errstate(invalid="ignore"): + return np.vstack([arr_complex, arr_nan_infj]) + + +@pytest.fixture +def arr_float_1d(arr_float): + return arr_float[:, 0] + + +@pytest.fixture +def arr_nan_1d(arr_nan): + return arr_nan[:, 0] + + +@pytest.fixture +def arr_float_nan_1d(arr_float_nan): + return arr_float_nan[:, 0] + + +@pytest.fixture +def arr_float1_nan_1d(arr_float1_nan): + return arr_float1_nan[:, 0] + + +@pytest.fixture +def arr_nan_float1_1d(arr_nan_float1): + return arr_nan_float1[:, 0] + + class TestnanopsDataFrame: def setup_method(self): np.random.seed(11235) @@ -299,45 +468,6 @@ def test_nanmean(self, skipna): nanops.nanmean, np.mean, skipna, allow_obj=False, allow_date=False ) - @pytest.mark.parametrize("val", [2**55, -(2**55), 20150515061816532]) - def test_nanmean_overflow(self, val): - # GH 10155 - # In the previous implementation mean can overflow for int dtypes, it - # is now consistent with numpy - - ser = Series(val, index=range(500), dtype=np.int64) - result = ser.mean() - np_result = ser.values.mean() - assert result == val - assert result == np_result - assert result.dtype == np.float64 - - @pytest.mark.parametrize( - "dtype", - [ - np.int16, - np.int32, - np.int64, - np.float32, - np.float64, - getattr(np, "float128", None), - ], - ) - def test_returned_dtype(self, dtype): - if dtype is None: - # no float128 available - return - - ser = Series(range(10), dtype=dtype) - group_a = ["mean", "std", "var", "skew", "kurt"] - group_b = ["min", "max"] - for method in group_a + group_b: - result = getattr(ser, method)() - if is_integer_dtype(dtype) and method in group_a: - assert result.dtype == np.float64 - else: - assert result.dtype == dtype - def test_nanmedian(self, skipna): with warnings.catch_warnings(record=True): warnings.simplefilter("ignore", RuntimeWarning) @@ -623,124 +753,137 @@ def test_nancov(self): targ1 = np.cov(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1] self.check_nancorr_nancov_1d(nanops.nancov, targ0, targ1) - @pytest.mark.parametrize( - "op,nanop", - [ - (operator.eq, nanops.naneq), - (operator.ne, nanops.nanne), - (operator.gt, nanops.nangt), - (operator.ge, nanops.nange), - (operator.lt, nanops.nanlt), - (operator.le, nanops.nanle), - ], - ) - def test_nan_comparison(self, op, nanop): - targ0 = op(self.arr_float, self.arr_float1) - arr_float = self.arr_float - arr_float1 = self.arr_float1 - arr_nan = self.arr_nan - arr_nan_nan = self.arr_nan_nan - arr_float_nan = self.arr_float_nan - arr_float1_nan = self.arr_float1_nan - arr_nan_float1 = self.arr_nan_float1 - - while targ0.ndim: - res0 = nanop(arr_float, arr_float1) - tm.assert_almost_equal(targ0, res0) - - if targ0.ndim > 1: - targ1 = np.vstack([targ0, arr_nan]) - else: - targ1 = np.hstack([targ0, arr_nan]) - res1 = nanop(arr_float_nan, arr_float1_nan) - tm.assert_numpy_array_equal(targ1, res1, check_dtype=False) - - targ2 = arr_nan_nan - res2 = nanop(arr_float_nan, arr_nan_float1) - tm.assert_numpy_array_equal(targ2, res2, check_dtype=False) - - # Lower dimension for next step in the loop - arr_float = np.take(arr_float, 0, axis=-1) - arr_float1 = np.take(arr_float1, 0, axis=-1) - arr_nan = np.take(arr_nan, 0, axis=-1) - arr_nan_nan = np.take(arr_nan_nan, 0, axis=-1) - arr_float_nan = np.take(arr_float_nan, 0, axis=-1) - arr_float1_nan = np.take(arr_float1_nan, 0, axis=-1) - arr_nan_float1 = np.take(arr_nan_float1, 0, axis=-1) - targ0 = np.take(targ0, 0, axis=-1) - @pytest.mark.parametrize( - "arr, correct", - [ - ("arr_complex", False), - ("arr_int", False), - ("arr_bool", False), - ("arr_str", False), - ("arr_utf", False), - ("arr_complex", False), - ("arr_complex_nan", False), - ("arr_nan_nanj", False), - ("arr_nan_infj", True), - ("arr_complex_nan_infj", True), - ], - ) - def test__has_infs_non_float(self, arr, correct): - val = getattr(self, arr) - while getattr(val, "ndim", True): - res0 = nanops._has_infs(val) - if correct: - assert res0 - else: - assert not res0 +@pytest.mark.parametrize( + "op,nanop", + [ + (operator.eq, nanops.naneq), + (operator.ne, nanops.nanne), + (operator.gt, nanops.nangt), + (operator.ge, nanops.nange), + (operator.lt, nanops.nanlt), + (operator.le, nanops.nanle), + ], +) +def test_nan_comparison(request, op, nanop, disable_bottleneck): + arr_float = request.getfixturevalue("arr_float") + arr_float1 = request.getfixturevalue("arr_float") + targ0 = op(arr_float, arr_float1) + arr_nan = request.getfixturevalue("arr_nan") + arr_nan_nan = request.getfixturevalue("arr_nan_nan") + arr_float_nan = request.getfixturevalue("arr_float_nan") + arr_float1_nan = request.getfixturevalue("arr_float_nan") + arr_nan_float1 = request.getfixturevalue("arr_nan_float1") + + while targ0.ndim: + res0 = nanop(arr_float, arr_float1) + tm.assert_almost_equal(targ0, res0) + + if targ0.ndim > 1: + targ1 = np.vstack([targ0, arr_nan]) + else: + targ1 = np.hstack([targ0, arr_nan]) + res1 = nanop(arr_float_nan, arr_float1_nan) + tm.assert_numpy_array_equal(targ1, res1, check_dtype=False) + + targ2 = arr_nan_nan + res2 = nanop(arr_float_nan, arr_nan_float1) + tm.assert_numpy_array_equal(targ2, res2, check_dtype=False) + + # Lower dimension for next step in the loop + arr_float = np.take(arr_float, 0, axis=-1) + arr_float1 = np.take(arr_float1, 0, axis=-1) + arr_nan = np.take(arr_nan, 0, axis=-1) + arr_nan_nan = np.take(arr_nan_nan, 0, axis=-1) + arr_float_nan = np.take(arr_float_nan, 0, axis=-1) + arr_float1_nan = np.take(arr_float1_nan, 0, axis=-1) + arr_nan_float1 = np.take(arr_nan_float1, 0, axis=-1) + targ0 = np.take(targ0, 0, axis=-1) - if not hasattr(val, "ndim"): - break - # Reduce dimension for next step in the loop - val = np.take(val, 0, axis=-1) +@pytest.mark.parametrize( + "arr, correct", + [ + ("arr_complex", False), + ("arr_int", False), + ("arr_bool", False), + ("arr_str", False), + ("arr_utf", False), + ("arr_complex", False), + ("arr_complex_nan", False), + ("arr_nan_nanj", False), + ("arr_nan_infj", True), + ("arr_complex_nan_infj", True), + ], +) +def test_has_infs_non_float(request, arr, correct, disable_bottleneck): + val = request.getfixturevalue(arr) + while getattr(val, "ndim", True): + res0 = nanops._has_infs(val) + if correct: + assert res0 + else: + assert not res0 + + if not hasattr(val, "ndim"): + break + + # Reduce dimension for next step in the loop + val = np.take(val, 0, axis=-1) - @pytest.mark.parametrize( - "arr, correct", - [ - ("arr_float", False), - ("arr_nan", False), - ("arr_float_nan", False), - ("arr_nan_nan", False), - ("arr_float_inf", True), - ("arr_inf", True), - ("arr_nan_inf", True), - ("arr_float_nan_inf", True), - ("arr_nan_nan_inf", True), - ], - ) - @pytest.mark.parametrize("astype", [None, "f4", "f2"]) - def test__has_infs_floats(self, arr, correct, astype): - val = getattr(self, arr) - if astype is not None: - val = val.astype(astype) - while getattr(val, "ndim", True): - res0 = nanops._has_infs(val) - if correct: - assert res0 - else: - assert not res0 - if not hasattr(val, "ndim"): - break +@pytest.mark.parametrize( + "arr, correct", + [ + ("arr_float", False), + ("arr_nan", False), + ("arr_float_nan", False), + ("arr_nan_nan", False), + ("arr_float_inf", True), + ("arr_inf", True), + ("arr_nan_inf", True), + ("arr_float_nan_inf", True), + ("arr_nan_nan_inf", True), + ], +) +@pytest.mark.parametrize("astype", [None, "f4", "f2"]) +def test_has_infs_floats(request, arr, correct, astype, disable_bottleneck): + val = request.getfixturevalue(arr) + if astype is not None: + val = val.astype(astype) + while getattr(val, "ndim", True): + res0 = nanops._has_infs(val) + if correct: + assert res0 + else: + assert not res0 - # Reduce dimension for next step in the loop - val = np.take(val, 0, axis=-1) + if not hasattr(val, "ndim"): + break - def test__bn_ok_dtype(self): - assert nanops._bn_ok_dtype(self.arr_float.dtype, "test") - assert nanops._bn_ok_dtype(self.arr_complex.dtype, "test") - assert nanops._bn_ok_dtype(self.arr_int.dtype, "test") - assert nanops._bn_ok_dtype(self.arr_bool.dtype, "test") - assert nanops._bn_ok_dtype(self.arr_str.dtype, "test") - assert nanops._bn_ok_dtype(self.arr_utf.dtype, "test") - assert not nanops._bn_ok_dtype(self.arr_date.dtype, "test") - assert not nanops._bn_ok_dtype(self.arr_tdelta.dtype, "test") - assert not nanops._bn_ok_dtype(self.arr_obj.dtype, "test") + # Reduce dimension for next step in the loop + val = np.take(val, 0, axis=-1) + + +@pytest.mark.parametrize( + "fixture", ["arr_float", "arr_complex", "arr_int", "arr_bool", "arr_str", "arr_utf"] +) +def test_bn_ok_dtype(fixture, request, disable_bottleneck): + obj = request.getfixturevalue(fixture) + assert nanops._bn_ok_dtype(obj.dtype, "test") + + +@pytest.mark.parametrize( + "fixture", + [ + "arr_date", + "arr_tdelta", + "arr_obj", + ], +) +def test_bn_not_ok_dtype(fixture, request, disable_bottleneck): + obj = request.getfixturevalue(fixture) + assert not nanops._bn_ok_dtype(obj.dtype, "test") class TestEnsureNumeric: @@ -1111,7 +1254,7 @@ def test_nanops_independent_of_mask_param(operation): @pytest.mark.parametrize("min_count", [-1, 0]) -def test_check_below_min_count__negative_or_zero_min_count(min_count): +def test_check_below_min_count_negative_or_zero_min_count(min_count): # GH35227 result = nanops.check_below_min_count((21, 37), None, min_count) expected_result = False @@ -1122,7 +1265,7 @@ def test_check_below_min_count__negative_or_zero_min_count(min_count): "mask", [None, np.array([False, False, True]), np.array([True] + 9 * [False])] ) @pytest.mark.parametrize("min_count, expected_result", [(1, False), (101, True)]) -def test_check_below_min_count__positive_min_count(mask, min_count, expected_result): +def test_check_below_min_count_positive_min_count(mask, min_count, expected_result): # GH35227 shape = (10, 10) result = nanops.check_below_min_count(shape, mask, min_count) @@ -1132,7 +1275,7 @@ def test_check_below_min_count__positive_min_count(mask, min_count, expected_res @td.skip_if_windows @td.skip_if_32bit @pytest.mark.parametrize("min_count, expected_result", [(1, False), (2812191852, True)]) -def test_check_below_min_count__large_shape(min_count, expected_result): +def test_check_below_min_count_large_shape(min_count, expected_result): # GH35227 large shape used to show that the issue is fixed shape = (2244367, 1253) result = nanops.check_below_min_count(shape, mask=None, min_count=min_count) @@ -1143,3 +1286,41 @@ def test_check_below_min_count__large_shape(min_count, expected_result): def test_check_bottleneck_disallow(any_real_numpy_dtype, func): # GH 42878 bottleneck sometimes produces unreliable results for mean and sum assert not nanops._bn_ok_dtype(np.dtype(any_real_numpy_dtype).type, func) + + +@pytest.mark.parametrize("val", [2**55, -(2**55), 20150515061816532]) +def test_nanmean_overflow(disable_bottleneck, val): + # GH 10155 + # In the previous implementation mean can overflow for int dtypes, it + # is now consistent with numpy + + ser = Series(val, index=range(500), dtype=np.int64) + result = ser.mean() + np_result = ser.values.mean() + assert result == val + assert result == np_result + assert result.dtype == np.float64 + + +@pytest.mark.parametrize( + "dtype", + [ + np.int16, + np.int32, + np.int64, + np.float32, + np.float64, + getattr(np, "float128", None), + ], +) +@pytest.mark.parametrize("method", ["mean", "std", "var", "skew", "kurt", "min", "max"]) +def test_returned_dtype(disable_bottleneck, dtype, method): + if dtype is None: + pytest.skip("np.float128 not available") + + ser = Series(range(10), dtype=dtype) + result = getattr(ser, method)() + if is_integer_dtype(dtype) and method not in ["min", "max"]: + assert result.dtype == np.float64 + else: + assert result.dtype == dtype
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). Avoid `setup_method` being unnecessarily called on tests that don't require all the DataFrame/array objects that are being created
https://api.github.com/repos/pandas-dev/pandas/pulls/49423
2022-10-31T19:30:32Z
2022-11-10T19:39:19Z
2022-11-10T19:39:19Z
2022-11-10T20:30:54Z