title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
COMPAT: SettingWithCopy will now warn when slices which can generate views are then set
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index 39ff807d6b1e4..51ddbdd4dbee6 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -91,6 +91,22 @@ API changes df df.dtypes +- ``SettingWithCopy`` raise/warnings (according to the option ``mode.chained_assignment``) will now be issued when setting a value on a sliced mixed-dtype DataFrame using chained-assignment. (:issue:`7845`) + + .. code-block:: python + + In [1]: df = DataFrame(np.arange(0,9), columns=['count']) + + In [2]: df['group'] = 'b' + + In [3]: df.iloc[0:5]['group'] = 'a' + /usr/local/bin/ipython:1: SettingWithCopyWarning: + A value is trying to be set on a copy of a slice from a DataFrame. + Try using .loc[row_indexer,col_indexer] = value instead + + See the the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy + + .. _whatsnew_0150.cat: Categoricals in Series/DataFrame diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 636dedfbeb7b7..4b8d13ce30355 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2075,13 +2075,12 @@ def _set_item(self, key, value): Add series to DataFrame in specified column. If series is a numpy-array (not a Series/TimeSeries), it must be the - same length as the DataFrame's index or an error will be thrown. + same length as the DataFrames index or an error will be thrown. - Series/TimeSeries will be conformed to the DataFrame's index to + Series/TimeSeries will be conformed to the DataFrames index to ensure homogeneity. """ - is_existing = key in self.columns self._ensure_valid_index(value) value = self._sanitize_column(key, value) NDFrame._set_item(self, key, value) @@ -2089,7 +2088,7 @@ def _set_item(self, key, value): # check if we are modifying a copy # try to set first as we want an invalid # value exeption to occur first - if is_existing: + if len(self): self._check_setitem_copy() def insert(self, loc, column, value, allow_duplicates=False): diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 71da20af2ad43..cef18c5ad3c2b 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1133,7 +1133,13 @@ def _slice(self, slobj, axis=0, typ=None): """ axis = self._get_block_manager_axis(axis) - return self._constructor(self._data.get_slice(slobj, axis=axis)) + result = self._constructor(self._data.get_slice(slobj, axis=axis)) + + # this could be a view + # but only in a single-dtyped view slicable case + is_copy = axis!=0 or result._is_view + result._set_is_copy(self, copy=is_copy) + return result def _set_item(self, key, value): self._data.set(key, value) @@ -1149,10 +1155,28 @@ def _set_is_copy(self, ref=None, copy=True): self.is_copy = None def _check_setitem_copy(self, stacklevel=4, t='setting'): - """ validate if we are doing a settitem on a chained copy. + """ + + validate if we are doing a settitem on a chained copy. If you call this function, be sure to set the stacklevel such that the - user will see the error *at the level of setting*""" + user will see the error *at the level of setting* + + It is technically possible to figure out that we are setting on + a copy even WITH a multi-dtyped pandas object. In other words, some blocks + may be views while other are not. Currently _is_view will ALWAYS return False + for multi-blocks to avoid having to handle this case. + + df = DataFrame(np.arange(0,9), columns=['count']) + df['group'] = 'b' + + # this technically need not raise SettingWithCopy if both are view (which is not + # generally guaranteed but is usually True + # however, this is in general not a good practice and we recommend using .loc + df.iloc[0:5]['group'] = 'a' + + """ + if self.is_copy: value = config.get_option('mode.chained_assignment') @@ -1170,14 +1194,18 @@ def _check_setitem_copy(self, stacklevel=4, t='setting'): pass if t == 'referant': - t = ("A value is trying to be set on a copy of a slice from a " + t = ("\n" + "A value is trying to be set on a copy of a slice from a " "DataFrame\n\n" - "See the the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy") + "See the the caveats in the documentation: " + "http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy") else: - t = ("A value is trying to be set on a copy of a slice from a " - "DataFrame.\nTry using .loc[row_index,col_indexer] = value " - "instead\n\n" - "See the the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy") + t = ("\n" + "A value is trying to be set on a copy of a slice from a " + "DataFrame.\n" + "Try using .loc[row_indexer,col_indexer] = value instead\n\n" + "See the the caveats in the documentation: " + "http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy") if value == 'raise': raise SettingWithCopyError(t) diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 08d3fbe335f35..48c3b4ece1d95 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -25,7 +25,7 @@ notnull, _DATELIKE_DTYPES, is_numeric_dtype, is_timedelta64_dtype, is_datetime64_dtype, is_categorical_dtype) - +from pandas.core.config import option_context from pandas import _np_version_under1p7 import pandas.lib as lib from pandas.lib import Timestamp @@ -635,7 +635,9 @@ def apply(self, func, *args, **kwargs): @wraps(func) def f(g): - return func(g, *args, **kwargs) + # ignore SettingWithCopy here in case the user mutates + with option_context('mode.chained_assignment',None): + return func(g, *args, **kwargs) return self._python_apply_general(f) diff --git a/pandas/core/internals.py b/pandas/core/internals.py index cad7b579aa554..98e8d4f88104f 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -2519,6 +2519,14 @@ def is_view(self): """ return a boolean if we are a single block and are a view """ if len(self.blocks) == 1: return self.blocks[0].values.base is not None + + # It is technically possible to figure out which blocks are views + # e.g. [ b.values.base is not None for b in self.blocks ] + # but then we have the case of possibly some blocks being a view + # and some blocks not. setting in theory is possible on the non-view + # blocks w/o causing a SettingWithCopy raise/warn. But this is a bit + # complicated + return False def get_bool_data(self, copy=False): diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 0dd729d58f174..c5cacda17edba 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -418,8 +418,12 @@ def test_setitem(self): self.frame['col8'] = 'foo' assert((self.frame['col8'] == 'foo').all()) + # this is partially a view (e.g. some blocks are view) + # so raise/warn smaller = self.frame[:2] - smaller['col10'] = ['1', '2'] + def f(): + smaller['col10'] = ['1', '2'] + self.assertRaises(com.SettingWithCopyError, f) self.assertEqual(smaller['col10'].dtype, np.object_) self.assertTrue((smaller['col10'] == ['1', '2']).all()) @@ -830,8 +834,11 @@ def test_fancy_getitem_slice_mixed(self): self.assertEqual(sliced['D'].dtype, np.float64) # get view with single block + # setting it triggers setting with copy sliced = self.frame.ix[:, -3:] - sliced['C'] = 4. + def f(): + sliced['C'] = 4. + self.assertRaises(com.SettingWithCopyError, f) self.assertTrue((self.frame['C'] == 4).all()) def test_fancy_setitem_int_labels(self): @@ -1618,7 +1625,10 @@ def test_irow(self): assert_frame_equal(result, expected) # verify slice is view - result[2] = 0. + # setting it makes it raise/warn + def f(): + result[2] = 0. + self.assertRaises(com.SettingWithCopyError, f) exp_col = df[2].copy() exp_col[4:8] = 0. assert_series_equal(df[2], exp_col) @@ -1645,7 +1655,10 @@ def test_icol(self): assert_frame_equal(result, expected) # verify slice is view - result[8] = 0. + # and that we are setting a copy + def f(): + result[8] = 0. + self.assertRaises(com.SettingWithCopyError, f) self.assertTrue((df[8] == 0).all()) # list of integers diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index ea4d66074e65a..5adaacbeb9d29 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -13,6 +13,7 @@ from pandas.core.groupby import (SpecificationError, DataError, _nargsort, _lexsort_indexer) from pandas.core.series import Series +from pandas.core.config import option_context from pandas.util.testing import (assert_panel_equal, assert_frame_equal, assert_series_equal, assert_almost_equal, assert_index_equal, assertRaisesRegexp) @@ -2299,9 +2300,11 @@ def f(group): self.assertEqual(result['d'].dtype, np.float64) - for key, group in grouped: - res = f(group) - assert_frame_equal(res, result.ix[key]) + # this is by definition a mutating operation! + with option_context('mode.chained_assignment',None): + for key, group in grouped: + res = f(group) + assert_frame_equal(res, result.ix[key]) def test_groupby_wrong_multi_labels(self): from pandas import read_csv diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index 0e962800fef08..64e9d18d0aa2f 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -3246,6 +3246,13 @@ def f(): df['column1'] = df['column1'] + 'c' str(df) + # from SO: http://stackoverflow.com/questions/24054495/potential-bug-setting-value-for-undefined-column-using-iloc + df = DataFrame(np.arange(0,9), columns=['count']) + df['group'] = 'b' + def f(): + df.iloc[0:5]['group'] = 'a' + self.assertRaises(com.SettingWithCopyError, f) + def test_float64index_slicing_bug(self): # GH 5557, related to slicing a float index ser = {256: 2321.0, 1: 78.0, 2: 2716.0, 3: 0.0, 4: 369.0, 5: 0.0, 6: 269.0, 7: 0.0, 8: 0.0, 9: 0.0, 10: 3536.0, 11: 0.0, 12: 24.0, 13: 0.0, 14: 931.0, 15: 0.0, 16: 101.0, 17: 78.0, 18: 9643.0, 19: 0.0, 20: 0.0, 21: 0.0, 22: 63761.0, 23: 0.0, 24: 446.0, 25: 0.0, 26: 34773.0, 27: 0.0, 28: 729.0, 29: 78.0, 30: 0.0, 31: 0.0, 32: 3374.0, 33: 0.0, 34: 1391.0, 35: 0.0, 36: 361.0, 37: 0.0, 38: 61808.0, 39: 0.0, 40: 0.0, 41: 0.0, 42: 6677.0, 43: 0.0, 44: 802.0, 45: 0.0, 46: 2691.0, 47: 0.0, 48: 3582.0, 49: 0.0, 50: 734.0, 51: 0.0, 52: 627.0, 53: 70.0, 54: 2584.0, 55: 0.0, 56: 324.0, 57: 0.0, 58: 605.0, 59: 0.0, 60: 0.0, 61: 0.0, 62: 3989.0, 63: 10.0, 64: 42.0, 65: 0.0, 66: 904.0, 67: 0.0, 68: 88.0, 69: 70.0, 70: 8172.0, 71: 0.0, 72: 0.0, 73: 0.0, 74: 64902.0, 75: 0.0, 76: 347.0, 77: 0.0, 78: 36605.0, 79: 0.0, 80: 379.0, 81: 70.0, 82: 0.0, 83: 0.0, 84: 3001.0, 85: 0.0, 86: 1630.0, 87: 7.0, 88: 364.0, 89: 0.0, 90: 67404.0, 91: 9.0, 92: 0.0, 93: 0.0, 94: 7685.0, 95: 0.0, 96: 1017.0, 97: 0.0, 98: 2831.0, 99: 0.0, 100: 2963.0, 101: 0.0, 102: 854.0, 103: 0.0, 104: 0.0, 105: 0.0, 106: 0.0, 107: 0.0, 108: 0.0, 109: 0.0, 110: 0.0, 111: 0.0, 112: 0.0, 113: 0.0, 114: 0.0, 115: 0.0, 116: 0.0, 117: 0.0, 118: 0.0, 119: 0.0, 120: 0.0, 121: 0.0, 122: 0.0, 123: 0.0, 124: 0.0, 125: 0.0, 126: 67744.0, 127: 22.0, 128: 264.0, 129: 0.0, 260: 197.0, 268: 0.0, 265: 0.0, 269: 0.0, 261: 0.0, 266: 1198.0, 267: 0.0, 262: 2629.0, 258: 775.0, 257: 0.0, 263: 0.0, 259: 0.0, 264: 163.0, 250: 10326.0, 251: 0.0, 252: 1228.0, 253: 0.0, 254: 2769.0, 255: 0.0} diff --git a/pandas/tools/pivot.py b/pandas/tools/pivot.py index 9132fea089fe7..ada13d6f4bccb 100644 --- a/pandas/tools/pivot.py +++ b/pandas/tools/pivot.py @@ -228,9 +228,14 @@ def _all_key(key): if len(rows) > 0: margin = data[rows + values].groupby(rows).agg(aggfunc) cat_axis = 1 + for key, piece in table.groupby(level=0, axis=cat_axis): all_key = _all_key(key) + + # we are going to mutate this, so need to copy! + piece = piece.copy() piece[all_key] = margin[key] + table_pieces.append(piece) margin_keys.append(all_key) else: diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py index 4601ad0784562..df2f270346e20 100644 --- a/pandas/tools/tests/test_merge.py +++ b/pandas/tools/tests/test_merge.py @@ -1320,6 +1320,7 @@ def test_append_many(self): result = chunks[0].append(chunks[1:]) tm.assert_frame_equal(result, self.frame) + chunks[-1] = chunks[-1].copy() chunks[-1]['foo'] = 'bar' result = chunks[0].append(chunks[1:]) tm.assert_frame_equal(result.ix[:, self.frame.columns], self.frame) @@ -1673,7 +1674,7 @@ def test_join_dups(self): def test_handle_empty_objects(self): df = DataFrame(np.random.randn(10, 4), columns=list('abcd')) - baz = df[:5] + baz = df[:5].copy() baz['foo'] = 'bar' empty = df[5:5]
https://api.github.com/repos/pandas-dev/pandas/pulls/7845
2014-07-26T01:13:57Z
2014-07-26T12:47:09Z
2014-07-26T12:47:09Z
2014-07-26T12:47:09Z
BUG/VIS: rot and fontsize are not applied to timeseries plots
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index a30400322716c..bf9dfa266817b 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -305,7 +305,7 @@ Bug Fixes (except for the case of two DataFrames with ``pairwise=False``, where behavior is unchanged) (:issue:`7542`) - +- Bug in ``DataFrame.plot`` and ``Series.plot`` may ignore ``rot`` and ``fontsize`` keywords (:issue:`7844`) - Bug in ``DatetimeIndex.value_counts`` doesn't preserve tz (:issue:`7735`) diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index f9ae058c065e3..5d9b43e48e3c1 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -240,21 +240,33 @@ def _check_ticks_props(self, axes, xlabelsize=None, xrot=None, yrot : number expected yticks rotation """ + from matplotlib.ticker import NullFormatter axes = self._flatten_visible(axes) for ax in axes: if xlabelsize or xrot: - xtick = ax.get_xticklabels()[0] - if xlabelsize is not None: - self.assertAlmostEqual(xtick.get_fontsize(), xlabelsize) - if xrot is not None: - self.assertAlmostEqual(xtick.get_rotation(), xrot) + if isinstance(ax.xaxis.get_minor_formatter(), NullFormatter): + # If minor ticks has NullFormatter, rot / fontsize are not retained + labels = ax.get_xticklabels() + else: + labels = ax.get_xticklabels() + ax.get_xticklabels(minor=True) + + for label in labels: + if xlabelsize is not None: + self.assertAlmostEqual(label.get_fontsize(), xlabelsize) + if xrot is not None: + self.assertAlmostEqual(label.get_rotation(), xrot) if ylabelsize or yrot: - ytick = ax.get_yticklabels()[0] - if ylabelsize is not None: - self.assertAlmostEqual(ytick.get_fontsize(), ylabelsize) - if yrot is not None: - self.assertAlmostEqual(ytick.get_rotation(), yrot) + if isinstance(ax.yaxis.get_minor_formatter(), NullFormatter): + labels = ax.get_yticklabels() + else: + labels = ax.get_yticklabels() + ax.get_yticklabels(minor=True) + + for label in labels: + if ylabelsize is not None: + self.assertAlmostEqual(label.get_fontsize(), ylabelsize) + if yrot is not None: + self.assertAlmostEqual(label.get_rotation(), yrot) def _check_ax_scales(self, axes, xaxis='linear', yaxis='linear'): """ @@ -872,6 +884,7 @@ def test_plot(self): self._check_visible(ax.xaxis) self._check_visible(ax.get_xticklabels()) self._check_visible([ax.xaxis.get_label()]) + self._check_ticks_props(ax, xrot=30) _check_plot_works(df.plot, title='blah') @@ -1069,14 +1082,16 @@ def test_subplots_timeseries(self): self._check_visible(axes[-1].get_xticklabels(minor=True)) self._check_visible(axes[-1].xaxis.get_label()) self._check_visible(axes[-1].get_yticklabels()) + self._check_ticks_props(axes, xrot=30) - axes = df.plot(kind=kind, subplots=True, sharex=False) + axes = df.plot(kind=kind, subplots=True, sharex=False, rot=45, fontsize=7) for ax in axes: self._check_visible(ax.xaxis) self._check_visible(ax.get_xticklabels()) self._check_visible(ax.get_xticklabels(minor=True)) self._check_visible(ax.xaxis.get_label()) self._check_visible(ax.get_yticklabels()) + self._check_ticks_props(ax, xlabelsize=7, xrot=45) def test_negative_log(self): df = - DataFrame(rand(6, 4), @@ -1363,7 +1378,17 @@ def test_plot_bar(self): _check_plot_works(df.plot, kind='bar') df = DataFrame({'a': [0, 1], 'b': [1, 0]}) - _check_plot_works(df.plot, kind='bar') + ax = _check_plot_works(df.plot, kind='bar') + self._check_ticks_props(ax, xrot=90) + + ax = df.plot(kind='bar', rot=35, fontsize=10) + self._check_ticks_props(ax, xrot=35, xlabelsize=10) + + ax = _check_plot_works(df.plot, kind='barh') + self._check_ticks_props(ax, yrot=0) + + ax = df.plot(kind='barh', rot=55, fontsize=11) + self._check_ticks_props(ax, yrot=55, ylabelsize=11) def _check_bar_alignment(self, df, kind='bar', stacked=False, subplots=False, align='center', @@ -1591,6 +1616,10 @@ def test_kde(self): ax = _check_plot_works(df.plot, kind='kde') expected = [com.pprint_thing(c) for c in df.columns] self._check_legend_labels(ax, labels=expected) + self._check_ticks_props(ax, xrot=0) + + ax = df.plot(kind='kde', rot=20, fontsize=5) + self._check_ticks_props(ax, xrot=20, xlabelsize=5) axes = _check_plot_works(df.plot, kind='kde', subplots=True) self._check_axes_shape(axes, axes_num=4, layout=(4, 1)) diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index ea7f963f79f28..40d848a48d103 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -753,6 +753,7 @@ class MPLPlot(object): """ _default_rot = 0 + orientation = None _pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog', 'mark_right', 'stacked'] @@ -788,7 +789,14 @@ def __init__(self, data, kind=None, by=None, subplots=False, sharex=True, self.use_index = use_index self.fontsize = fontsize - self.rot = rot + + if rot is not None: + self.rot = rot + else: + if isinstance(self._default_rot, dict): + self.rot = self._default_rot[self.kind] + else: + self.rot = self._default_rot if grid is None: grid = False if secondary_y else True @@ -1018,14 +1026,30 @@ def _adorn_subplots(self): else: self.axes[0].set_title(self.title) - if self._need_to_set_index: - labels = [com.pprint_thing(key) for key in self.data.index] - labels = dict(zip(range(len(self.data.index)), labels)) + labels = [com.pprint_thing(key) for key in self.data.index] + labels = dict(zip(range(len(self.data.index)), labels)) - for ax_ in self.axes: - # ax_.set_xticks(self.xticks) - xticklabels = [labels.get(x, '') for x in ax_.get_xticks()] - ax_.set_xticklabels(xticklabels, rotation=self.rot) + for ax in self.axes: + if self.orientation == 'vertical' or self.orientation is None: + if self._need_to_set_index: + xticklabels = [labels.get(x, '') for x in ax.get_xticks()] + ax.set_xticklabels(xticklabels) + self._apply_axis_properties(ax.xaxis, rot=self.rot, + fontsize=self.fontsize) + elif self.orientation == 'horizontal': + if self._need_to_set_index: + yticklabels = [labels.get(y, '') for y in ax.get_yticks()] + ax.set_yticklabels(yticklabels) + self._apply_axis_properties(ax.yaxis, rot=self.rot, + fontsize=self.fontsize) + + def _apply_axis_properties(self, axis, rot=None, fontsize=None): + labels = axis.get_majorticklabels() + axis.get_minorticklabels() + for label in labels: + if rot is not None: + label.set_rotation(rot) + if fontsize is not None: + label.set_fontsize(fontsize) @property def legend_title(self): @@ -1336,6 +1360,8 @@ def _get_errorbars(self, label=None, index=None, xerr=True, yerr=True): class KdePlot(MPLPlot): + orientation = 'vertical' + def __init__(self, data, bw_method=None, ind=None, **kwargs): MPLPlot.__init__(self, data, **kwargs) self.bw_method=bw_method @@ -1480,6 +1506,9 @@ def _post_plot_logic(self): class LinePlot(MPLPlot): + _default_rot = 30 + orientation = 'vertical' + def __init__(self, data, **kwargs): MPLPlot.__init__(self, data, **kwargs) if self.stacked: @@ -1657,16 +1686,9 @@ def _post_plot_logic(self): index_name = self._get_index_name() - rot = 30 - if self.rot is not None: - rot = self.rot - for ax in self.axes: if condition: - format_date_labels(ax, rot=rot) - elif self.rot is not None: - for l in ax.get_xticklabels(): - l.set_rotation(self.rot) + format_date_labels(ax, rot=self.rot) if index_name is not None: ax.set_xlabel(index_name) @@ -1767,9 +1789,6 @@ def __init__(self, data, **kwargs): self.ax_pos = self.tick_pos - self.tickoffset def _args_adjust(self): - if self.rot is None: - self.rot = self._default_rot[self.kind] - if com.is_list_like(self.bottom): self.bottom = np.array(self.bottom) if com.is_list_like(self.left): @@ -1859,8 +1878,7 @@ def _post_plot_logic(self): if self.kind == 'bar': ax.set_xlim((s_edge, e_edge)) ax.set_xticks(self.tick_pos) - ax.set_xticklabels(str_index, rotation=self.rot, - fontsize=self.fontsize) + ax.set_xticklabels(str_index) if not self.log: # GH3254+ ax.axhline(0, color='k', linestyle='--') if name is not None: @@ -1869,14 +1887,22 @@ def _post_plot_logic(self): # horizontal bars ax.set_ylim((s_edge, e_edge)) ax.set_yticks(self.tick_pos) - ax.set_yticklabels(str_index, rotation=self.rot, - fontsize=self.fontsize) + ax.set_yticklabels(str_index) ax.axvline(0, color='k', linestyle='--') if name is not None: ax.set_ylabel(name) else: raise NotImplementedError(self.kind) + @property + def orientation(self): + if self.kind == 'bar': + return 'vertical' + elif self.kind == 'barh': + return 'horizontal' + else: + raise NotImplementedError(self.kind) + class PiePlot(MPLPlot):
In some plots, `rot` and `fontsize` arguments are not applied properly. - timeseries line / area plot: `rot` is not applied to minor ticklabels, and `fontsize` is completely ignored. (Fixed to apply to xticklabels) - kde plot: `rot` and `fontsize` are completely ignored. (Fixed to apply to xticklabels) - scatter and hexbin plots: `rot` and `fontsize` are completely ignored. (Under confirmation) ``` import pandas as pd import numpy as np df = pd.DataFrame(np.random.randn(10, 4), index=pd.date_range(start='2014-07-01', freq='M', periods=10)) df.plot(rot=80, fontsize=15) ``` ### Current Result ![figure_ng](https://cloud.githubusercontent.com/assets/1696302/3709379/7841977e-1454-11e4-8396-fc42e943d54a.png) ### After Fix ![figure_ok](https://cloud.githubusercontent.com/assets/1696302/3709380/7cc1037a-1454-11e4-9456-a9335907473b.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/7844
2014-07-25T23:37:18Z
2014-07-28T16:00:10Z
2014-07-28T16:00:10Z
2014-08-30T21:40:48Z
BUG: PeriodIndex.unique results in Int64Index
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index 7e0931ca1b745..0b7287ed69c56 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -274,6 +274,8 @@ Bug Fixes - Bug in ``is_superperiod`` and ``is_subperiod`` cannot handle higher frequencies than ``S`` (:issue:`7760`, :issue:`7772`, :issue:`7803`) +- Bug in ``PeriodIndex.unique`` returns int64 ``np.ndarray`` (:issue:`7540`) + - Bug in ``DataFrame.reset_index`` which has ``MultiIndex`` contains ``PeriodIndex`` or ``DatetimeIndex`` with tz raises ``ValueError`` (:issue:`7746`, :issue:`7793`) diff --git a/pandas/core/base.py b/pandas/core/base.py index d55196b56c784..beffbfb2923db 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -552,3 +552,17 @@ def __sub__(self, other): def _add_delta(self, other): return NotImplemented + + def unique(self): + """ + Index.unique with handling for DatetimeIndex/PeriodIndex metadata + + Returns + ------- + result : DatetimeIndex or PeriodIndex + """ + from pandas.core.index import Int64Index + result = Int64Index.unique(self) + return self._simple_new(result, name=self.name, freq=self.freq, + tz=getattr(self, 'tz', None)) + diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index 494c0ee6b2bec..9acb1804a7ef0 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -250,11 +250,13 @@ def test_value_counts_unique_nunique(self): expected_s = Series(range(10, 0, -1), index=values[::-1], dtype='int64') tm.assert_series_equal(o.value_counts(), expected_s) - if isinstance(o, DatetimeIndex): - # DatetimeIndex.unique returns DatetimeIndex - self.assertTrue(o.unique().equals(klass(values))) - else: - self.assert_numpy_array_equal(o.unique(), values) + result = o.unique() + if isinstance(o, (DatetimeIndex, PeriodIndex)): + self.assertTrue(isinstance(result, o.__class__)) + self.assertEqual(result.name, o.name) + self.assertEqual(result.freq, o.freq) + + self.assert_numpy_array_equal(result, values) self.assertEqual(o.nunique(), len(np.unique(o.values))) @@ -263,17 +265,13 @@ def test_value_counts_unique_nunique(self): klass = type(o) values = o.values - if o.values.dtype == 'int64': - # skips int64 because it doesn't allow to include nan or None - continue - if ((isinstance(o, Int64Index) and not isinstance(o, (DatetimeIndex, PeriodIndex)))): # skips int64 because it doesn't allow to include nan or None continue # special assign to the numpy array - if o.values.dtype == 'datetime64[ns]': + if o.values.dtype == 'datetime64[ns]' or isinstance(o, PeriodIndex): values[0:2] = pd.tslib.iNaT else: values[0:2] = null_obj @@ -294,8 +292,8 @@ def test_value_counts_unique_nunique(self): result = o.unique() self.assert_numpy_array_equal(result[1:], values[2:]) - if isinstance(o, DatetimeIndex): - self.assertTrue(result[0] is pd.NaT) + if isinstance(o, (DatetimeIndex, PeriodIndex)): + self.assertTrue(result.asi8[0] == pd.tslib.iNaT) else: self.assertTrue(pd.isnull(result[0])) @@ -706,7 +704,7 @@ def test_sub_isub(self): rng -= 1 tm.assert_index_equal(rng, expected) - def test_value_counts(self): + def test_value_counts_unique(self): # GH 7735 for tz in [None, 'UTC', 'Asia/Tokyo', 'US/Eastern']: idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10) @@ -717,6 +715,9 @@ def test_value_counts(self): expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64') tm.assert_series_equal(idx.value_counts(), expected) + expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10, tz=tz) + tm.assert_index_equal(idx.unique(), expected) + idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00', '2013-01-01 09:00', '2013-01-01 08:00', '2013-01-01 08:00', pd.NaT], tz=tz) @@ -728,6 +729,8 @@ def test_value_counts(self): expected = Series([3, 2, 1], index=exp_idx) tm.assert_series_equal(idx.value_counts(dropna=False), expected) + tm.assert_index_equal(idx.unique(), exp_idx) + class TestPeriodIndexOps(Ops): _allowed = '_allow_period_index_ops' @@ -987,7 +990,7 @@ def test_sub_isub(self): rng -= 1 tm.assert_index_equal(rng, expected) - def test_value_counts(self): + def test_value_counts_unique(self): # GH 7735 idx = pd.period_range('2011-01-01 09:00', freq='H', periods=10) # create repeated values, 'n'th element is repeated by n+1 times @@ -1000,6 +1003,9 @@ def test_value_counts(self): expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64') tm.assert_series_equal(idx.value_counts(), expected) + expected = pd.period_range('2011-01-01 09:00', freq='H', periods=10) + tm.assert_index_equal(idx.unique(), expected) + idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 09:00', '2013-01-01 09:00', '2013-01-01 08:00', '2013-01-01 08:00', pd.NaT], freq='H') @@ -1011,6 +1017,8 @@ def test_value_counts(self): expected = Series([3, 2, 1], index=exp_idx) tm.assert_series_equal(idx.value_counts(dropna=False), expected) + tm.assert_index_equal(idx.unique(), exp_idx) + if __name__ == '__main__': import nose diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 2a3c53135a644..4aa424ea08031 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -848,18 +848,6 @@ def take(self, indices, axis=0): return self[maybe_slice] return super(DatetimeIndex, self).take(indices, axis) - def unique(self): - """ - Index.unique with handling for DatetimeIndex metadata - - Returns - ------- - result : DatetimeIndex - """ - result = Int64Index.unique(self) - return DatetimeIndex._simple_new(result, tz=self.tz, - name=self.name) - def union(self, other): """ Specialized union for DatetimeIndex objects. If combine
Closes #7540.
https://api.github.com/repos/pandas-dev/pandas/pulls/7843
2014-07-25T23:26:27Z
2014-07-26T14:44:41Z
2014-07-26T14:44:41Z
2014-07-27T01:11:02Z
BUG: Series.__iter__ not dealing with category type well (GH7839)
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index 7e0931ca1b745..39ff807d6b1e4 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -97,7 +97,7 @@ Categoricals in Series/DataFrame ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :class:`~pandas.Categorical` can now be included in `Series` and `DataFrames` and gained new -methods to manipulate. Thanks to Jan Schultz for much of this API/implementation. (:issue:`3943`, :issue:`5313`, :issue:`5314`, :issue:`7444`). +methods to manipulate. Thanks to Jan Schultz for much of this API/implementation. (:issue:`3943`, :issue:`5313`, :issue:`5314`, :issue:`7444`, :issue:`7839`). For full docs, see the :ref:`Categorical introduction <categorical>` and the :ref:`API documentation <api.categorical>`. diff --git a/pandas/core/series.py b/pandas/core/series.py index 502c01ce6d1d1..c0e1e8a13eea3 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -973,7 +973,9 @@ def _get_repr( return result def __iter__(self): - if np.issubdtype(self.dtype, np.datetime64): + if com.is_categorical_dtype(self.dtype): + return iter(self.values) + elif np.issubdtype(self.dtype, np.datetime64): return (lib.Timestamp(x) for x in self.values) else: return iter(self.values) diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py index 0aa7f2b67c7c6..b70e50eb3d030 100644 --- a/pandas/tests/test_categorical.py +++ b/pandas/tests/test_categorical.py @@ -647,6 +647,27 @@ def test_nan_handling(self): np.array(["a","b",np.nan], dtype=np.object_)) self.assert_numpy_array_equal(s3.cat._codes, np.array([0,1,2,0])) + def test_sequence_like(self): + + # GH 7839 + # make sure can iterate + df = DataFrame({"id":[1,2,3,4,5,6], "raw_grade":['a', 'b', 'b', 'a', 'a', 'e']}) + df['grade'] = Categorical(df['raw_grade']) + + # basic sequencing testing + result = list(df.grade.cat) + expected = np.array(df.grade.cat).tolist() + tm.assert_almost_equal(result,expected) + + # iteration + for t in df.itertuples(index=False): + str(t) + + for row, s in df.iterrows(): + str(s) + + for c, col in df.iteritems(): + str(s) def test_series_delegations(self):
closes #7839
https://api.github.com/repos/pandas-dev/pandas/pulls/7842
2014-07-25T22:41:05Z
2014-07-25T23:30:04Z
2014-07-25T23:30:04Z
2014-07-25T23:30:04Z
TST/BUG: html tests not skipping properly if lxml is not installed
diff --git a/pandas/io/tests/test_html.py b/pandas/io/tests/test_html.py index 326b7bc004564..a7540fc716e1f 100644 --- a/pandas/io/tests/test_html.py +++ b/pandas/io/tests/test_html.py @@ -595,18 +595,28 @@ def _lang_enc(filename): class TestReadHtmlEncoding(tm.TestCase): files = glob.glob(os.path.join(DATA_PATH, 'html_encoding', '*.html')) + flavor = 'bs4' + + @classmethod + def setUpClass(cls): + super(TestReadHtmlEncoding, cls).setUpClass() + _skip_if_none_of((cls.flavor, 'html5lib')) + + def read_html(self, *args, **kwargs): + kwargs['flavor'] = self.flavor + return read_html(*args, **kwargs) def read_filename(self, f, encoding): - return read_html(f, encoding=encoding, index_col=0) + return self.read_html(f, encoding=encoding, index_col=0) def read_file_like(self, f, encoding): with open(f, 'rb') as fobj: - return read_html(BytesIO(fobj.read()), encoding=encoding, - index_col=0) + return self.read_html(BytesIO(fobj.read()), encoding=encoding, + index_col=0) def read_string(self, f, encoding): with open(f, 'rb') as fobj: - return read_html(fobj.read(), encoding=encoding, index_col=0) + return self.read_html(fobj.read(), encoding=encoding, index_col=0) def test_encode(self): for f in self.files: @@ -618,6 +628,15 @@ def test_encode(self): tm.assert_frame_equal(from_string, from_filename) +class TestReadHtmlEncodingLxml(TestReadHtmlEncoding): + flavor = 'lxml' + + @classmethod + def setUpClass(cls): + super(TestReadHtmlEncodingLxml, cls).setUpClass() + _skip_if_no(cls.flavor) + + class TestReadHtmlLxml(tm.TestCase): @classmethod def setUpClass(cls):
null
https://api.github.com/repos/pandas-dev/pandas/pulls/7836
2014-07-24T19:25:07Z
2014-07-24T21:25:40Z
2014-07-24T21:25:40Z
2014-07-24T21:25:42Z
ENH: PeriodIndex can accept freq with mult
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 4394981abb8c3..29b955a55fcc9 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -591,7 +591,7 @@ various docstrings for the classes. These operations (``apply``, ``rollforward`` and ``rollback``) preserves time (hour, minute, etc) information by default. To reset time, use ``normalize=True`` keyword when creating the offset instance. If ``normalize=True``, result is normalized after the function is applied. - .. ipython:: python +.. ipython:: python day = Day() day.apply(Timestamp('2014-01-01 09:00')) @@ -1257,8 +1257,10 @@ be created with the convenience function ``period_range``. Period ~~~~~~ + A ``Period`` represents a span of time (e.g., a day, a month, a quarter, etc). -It can be created using a frequency alias: +You can specify the span via ``freq`` keyword using a frequency alias like below. +Because ``freq`` represents a span of ``Period``, it cannot be negative like "-3D". .. ipython:: python @@ -1268,11 +1270,10 @@ It can be created using a frequency alias: Period('2012-1-1 19:00', freq='H') -Unlike time stamped data, pandas does not support frequencies at multiples of -DateOffsets (e.g., '3Min') for periods. + Period('2012-1-1 19:00', freq='5H') Adding and subtracting integers from periods shifts the period by its own -frequency. +frequency. Arithmetic is not allowed between ``Period`` with different ``freq`` (span). .. ipython:: python @@ -1282,6 +1283,15 @@ frequency. p - 3 + p = Period('2012-01', freq='2M') + + p + 2 + + p - 1 + + p == Period('2012-01', freq='3M') + + If ``Period`` freq is daily or higher (``D``, ``H``, ``T``, ``S``, ``L``, ``U``, ``N``), ``offsets`` and ``timedelta``-like can be added if the result can have the same freq. Otherise, ``ValueError`` will be raised. .. ipython:: python @@ -1335,6 +1345,13 @@ The ``PeriodIndex`` constructor can also be used directly: PeriodIndex(['2011-1', '2011-2', '2011-3'], freq='M') +Passing multiplied frequency outputs a sequence of ``Period`` which +has multiplied span. + +.. ipython:: python + + PeriodIndex(start='2014-01', freq='3M', periods=4) + Just like ``DatetimeIndex``, a ``PeriodIndex`` can also be used to index pandas objects: diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index e9d39e0441055..8e34ea8f81a67 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -109,6 +109,32 @@ We are now supporting a ``Series.dt.strftime`` method for datetime-likes to gene The string format is as the python standard library and details can be found `here <https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior>`_ +.. _whatsnew_0170.periodfreq: + +Period Frequency Enhancement +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``Period``, ``PeriodIndex`` and ``period_range`` can now accept multiplied freq. Also, ``Period.freq`` and ``PeriodIndex.freq`` are now stored as ``DateOffset`` instance like ``DatetimeIndex``, not ``str`` (:issue:`7811`) + +Multiplied freq represents a span of corresponding length. Below example creates a period of 3 days. Addition and subtraction will shift the period by its span. + +.. ipython:: python + + p = pd.Period('2015-08-01', freq='3D') + p + p + 1 + p - 2 + p.to_timestamp() + p.to_timestamp(how='E') + +You can use multiplied freq in ``PeriodIndex`` and ``period_range``. + +.. ipython:: python + + idx = pd.period_range('2015-08-01', periods=4, freq='2D') + idx + idx + 1 + .. _whatsnew_0170.enhancements.sas_xport: Support for SAS XPORT files @@ -183,7 +209,6 @@ Other enhancements - ``pandas.tseries.offsets`` larger than the ``Day`` offset can now be used with with ``Series`` for addition/subtraction (:issue:`10699`). See the :ref:`Documentation <timeseries.offsetseries>` for more details. - ``.as_blocks`` will now take a ``copy`` optional argument to return a copy of the data, default is to copy (no change in behavior from prior versions), (:issue:`9607`) - - ``regex`` argument to ``DataFrame.filter`` now handles numeric column names instead of raising ``ValueError`` (:issue:`10384`). - ``pd.read_stata`` will now read Stata 118 type files. (:issue:`9882`) diff --git a/pandas/io/tests/data/legacy_msgpack/0.16.0/0.16.0_x86_64_darwin_2.7.9.msgpack b/pandas/io/tests/data/legacy_msgpack/0.16.0/0.16.0_x86_64_darwin_2.7.9.msgpack new file mode 100644 index 0000000000000..554f8a6e0742a Binary files /dev/null and b/pandas/io/tests/data/legacy_msgpack/0.16.0/0.16.0_x86_64_darwin_2.7.9.msgpack differ diff --git a/pandas/io/tests/data/legacy_msgpack/0.16.2/0.16.2_x86_64_darwin_2.7.9.msgpack b/pandas/io/tests/data/legacy_msgpack/0.16.2/0.16.2_x86_64_darwin_2.7.9.msgpack new file mode 100644 index 0000000000000..000879f4cb2c2 Binary files /dev/null and b/pandas/io/tests/data/legacy_msgpack/0.16.2/0.16.2_x86_64_darwin_2.7.9.msgpack differ diff --git a/pandas/io/tests/data/legacy_pickle/0.16.0/0.16.0_x86_64_darwin_2.7.9.pickle b/pandas/io/tests/data/legacy_pickle/0.16.0/0.16.0_x86_64_darwin_2.7.9.pickle new file mode 100644 index 0000000000000..d45936baa1e00 Binary files /dev/null and b/pandas/io/tests/data/legacy_pickle/0.16.0/0.16.0_x86_64_darwin_2.7.9.pickle differ diff --git a/pandas/io/tests/data/legacy_pickle/0.16.2/0.16.2_x86_64_darwin_2.7.9.pickle b/pandas/io/tests/data/legacy_pickle/0.16.2/0.16.2_x86_64_darwin_2.7.9.pickle new file mode 100644 index 0000000000000..d45936baa1e00 Binary files /dev/null and b/pandas/io/tests/data/legacy_pickle/0.16.2/0.16.2_x86_64_darwin_2.7.9.pickle differ diff --git a/pandas/io/tests/test_pickle.py b/pandas/io/tests/test_pickle.py index 8f2079722c00e..1ade6ac0f8068 100644 --- a/pandas/io/tests/test_pickle.py +++ b/pandas/io/tests/test_pickle.py @@ -17,6 +17,8 @@ from pandas.compat import u from pandas.util.misc import is_little_endian import pandas +from pandas.tseries.offsets import Day, MonthEnd + class TestPickle(): """ @@ -90,6 +92,10 @@ def read_pickles(self, version): if 'ts' in data['series']: self._validate_timeseries(data['series']['ts'], self.data['series']['ts']) self._validate_frequency(data['series']['ts']) + if 'index' in data: + if 'period' in data['index']: + self._validate_periodindex(data['index']['period'], + self.data['index']['period']) n += 1 assert n > 0, 'Pickle files are not tested' @@ -162,7 +168,6 @@ def _validate_timeseries(self, pickled, current): def _validate_frequency(self, pickled): # GH 9291 - from pandas.tseries.offsets import Day freq = pickled.index.freq result = freq + Day(1) tm.assert_equal(result, Day(2)) @@ -175,6 +180,13 @@ def _validate_frequency(self, pickled): tm.assert_equal(isinstance(result, pandas.Timedelta), True) tm.assert_equal(result, pandas.Timedelta(days=1, nanoseconds=1)) + def _validate_periodindex(self, pickled, current): + tm.assert_index_equal(pickled, current) + tm.assertIsInstance(pickled.freq, MonthEnd) + tm.assert_equal(pickled.freq, MonthEnd()) + tm.assert_equal(pickled.freqstr, 'M') + tm.assert_index_equal(pickled.shift(2), current.shift(2)) + if __name__ == '__main__': import nose diff --git a/pandas/src/period.pyx b/pandas/src/period.pyx index 619d1a87a71e0..1dbf469a946b5 100644 --- a/pandas/src/period.pyx +++ b/pandas/src/period.pyx @@ -615,6 +615,9 @@ cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps, return result +_DIFFERENT_FREQ_ERROR = "Input has different freq={1} from Period(freq={0})" + + cdef class Period(object): """ Represents an period of time @@ -624,8 +627,7 @@ cdef class Period(object): value : Period or compat.string_types, default None The time period represented (e.g., '4Q2005') freq : str, default None - e.g., 'B' for businessday. Must be a singular rule-code (e.g. 5T is not - allowed). + One of pandas period strings or corresponding objects year : int, default None month : int, default 1 quarter : int, default None @@ -641,12 +643,33 @@ cdef class Period(object): _comparables = ['name','freqstr'] _typ = 'period' + @classmethod + def _maybe_convert_freq(cls, object freq): + + if isinstance(freq, compat.string_types): + from pandas.tseries.frequencies import _period_alias_dict + freq = _period_alias_dict.get(freq, freq) + elif isinstance(freq, (int, tuple)): + from pandas.tseries.frequencies import get_freq_code as _gfc + from pandas.tseries.frequencies import _get_freq_str + code, stride = _gfc(freq) + freq = _get_freq_str(code, stride) + + from pandas.tseries.frequencies import to_offset + freq = to_offset(freq) + + if freq.n <= 0: + raise ValueError('Frequency must be positive, because it' + ' represents span: {0}'.format(freq.freqstr)) + + return freq + @classmethod def _from_ordinal(cls, ordinal, freq): """ fast creation from an ordinal and freq that are already validated! """ self = Period.__new__(cls) self.ordinal = ordinal - self.freq = freq + self.freq = cls._maybe_convert_freq(freq) return self def __init__(self, value=None, freq=None, ordinal=None, @@ -659,8 +682,6 @@ cdef class Period(object): # periods such as A, Q, etc. Every five minutes would be, e.g., # ('T', 5) but may be passed in as a string like '5T' - self.freq = None - # ordinal is the period offset from the gregorian proleptic epoch if ordinal is not None and value is not None: @@ -675,9 +696,8 @@ cdef class Period(object): elif value is None: if freq is None: raise ValueError("If value is None, freq cannot be None") - ordinal = _ordinal_from_fields(year, month, quarter, day, - hour, minute, second, freq) + hour, minute, second, freq) elif isinstance(value, Period): other = value @@ -698,8 +718,8 @@ cdef class Period(object): if lib.is_integer(value): value = str(value) value = value.upper() - dt, _, reso = parse_time_string(value, freq) + if freq is None: try: freq = frequencies.Resolution.get_freq(reso) @@ -723,24 +743,22 @@ cdef class Period(object): raise ValueError(msg) base, mult = _gfc(freq) - if mult != 1: - # TODO: Better error message - this is slightly confusing - raise ValueError('Only mult == 1 supported') if ordinal is None: self.ordinal = get_period_ordinal(dt.year, dt.month, dt.day, - dt.hour, dt.minute, dt.second, dt.microsecond, 0, - base) + dt.hour, dt.minute, dt.second, + dt.microsecond, 0, base) else: self.ordinal = ordinal - self.freq = frequencies._get_freq_str(base) + self.freq = self._maybe_convert_freq(freq) def __richcmp__(self, other, op): if isinstance(other, Period): from pandas.tseries.frequencies import get_freq_code as _gfc if other.freq != self.freq: - raise ValueError("Cannot compare non-conforming periods") + msg = _DIFFERENT_FREQ_ERROR.format(self.freqstr, other.freqstr) + raise ValueError(msg) if self.ordinal == tslib.iNaT or other.ordinal == tslib.iNaT: return _nat_scalar_rules[op] return PyObject_RichCompareBool(self.ordinal, other.ordinal, op) @@ -758,7 +776,7 @@ cdef class Period(object): def _add_delta(self, other): from pandas.tseries import frequencies if isinstance(other, (timedelta, np.timedelta64, offsets.Tick, Timedelta)): - offset = frequencies.to_offset(self.freq) + offset = frequencies.to_offset(self.freq.rule_code) if isinstance(offset, offsets.Tick): nanos = tslib._delta_to_nanoseconds(other) offset_nanos = tslib._delta_to_nanoseconds(offset) @@ -769,18 +787,21 @@ cdef class Period(object): else: ordinal = self.ordinal + (nanos // offset_nanos) return Period(ordinal=ordinal, freq=self.freq) + msg = 'Input cannnot be converted to Period(freq={0})' + raise ValueError(msg) elif isinstance(other, offsets.DateOffset): freqstr = frequencies.get_standard_freq(other) base = frequencies.get_base_alias(freqstr) - - if base == self.freq: + if base == self.freq.rule_code: if self.ordinal == tslib.iNaT: ordinal = self.ordinal else: ordinal = self.ordinal + other.n return Period(ordinal=ordinal, freq=self.freq) - - raise ValueError("Input has different freq from Period(freq={0})".format(self.freq)) + msg = _DIFFERENT_FREQ_ERROR.format(self.freqstr, other.freqstr) + raise ValueError(msg) + else: # pragma no cover + return NotImplemented def __add__(self, other): if isinstance(other, (timedelta, np.timedelta64, @@ -790,7 +811,7 @@ cdef class Period(object): if self.ordinal == tslib.iNaT: ordinal = self.ordinal else: - ordinal = self.ordinal + other + ordinal = self.ordinal + other * self.freq.n return Period(ordinal=ordinal, freq=self.freq) else: # pragma: no cover return NotImplemented @@ -804,7 +825,7 @@ cdef class Period(object): if self.ordinal == tslib.iNaT: ordinal = self.ordinal else: - ordinal = self.ordinal - other + ordinal = self.ordinal - other * self.freq.n return Period(ordinal=ordinal, freq=self.freq) elif isinstance(other, Period): if other.freq != self.freq: @@ -836,13 +857,18 @@ cdef class Period(object): base1, mult1 = _gfc(self.freq) base2, mult2 = _gfc(freq) - if mult2 != 1: - raise ValueError('Only mult == 1 supported') - - end = how == 'E' - new_ordinal = period_asfreq(self.ordinal, base1, base2, end) + if self.ordinal == tslib.iNaT: + ordinal = self.ordinal + else: + # mult1 can't be negative or 0 + end = how == 'E' + if end: + ordinal = self.ordinal + mult1 - 1 + else: + ordinal = self.ordinal + ordinal = period_asfreq(ordinal, base1, base2, end) - return Period(ordinal=new_ordinal, freq=base2) + return Period(ordinal=ordinal, freq=freq) @property def start_time(self): @@ -853,7 +879,8 @@ cdef class Period(object): if self.ordinal == tslib.iNaT: ordinal = self.ordinal else: - ordinal = (self + 1).start_time.value - 1 + # freq.n can't be negative or 0 + ordinal = (self + self.freq.n).start_time.value - 1 return Timestamp(ordinal) def to_timestamp(self, freq=None, how='start', tz=None): @@ -947,14 +974,15 @@ cdef class Period(object): def __str__(self): return self.__unicode__() + @property + def freqstr(self): + return self.freq.freqstr + def __repr__(self): - from pandas.tseries import frequencies from pandas.tseries.frequencies import get_freq_code as _gfc base, mult = _gfc(self.freq) formatted = period_format(self.ordinal, base) - freqstr = frequencies._reverse_period_code_map[base] - - return "Period('%s', '%s')" % (formatted, freqstr) + return "Period('%s', '%s')" % (formatted, self.freqstr) def __unicode__(self): """ @@ -1123,9 +1151,6 @@ def _ordinal_from_fields(year, month, quarter, day, hour, minute, second, freq): from pandas.tseries.frequencies import get_freq_code as _gfc base, mult = _gfc(freq) - if mult != 1: - raise ValueError('Only mult == 1 supported') - if quarter is not None: year, month = _quarter_to_myear(year, quarter, freq) diff --git a/pandas/tseries/base.py b/pandas/tseries/base.py index 96c3883f7cbf3..912a0c3f88405 100644 --- a/pandas/tseries/base.py +++ b/pandas/tseries/base.py @@ -13,7 +13,7 @@ import pandas.lib as lib from pandas.core.index import Index from pandas.util.decorators import Appender, cache_readonly -from pandas.tseries.frequencies import infer_freq, to_offset, Resolution +import pandas.tseries.frequencies as frequencies import pandas.algos as _algos @@ -136,7 +136,7 @@ def inferred_freq(self): frequency. """ try: - return infer_freq(self) + return frequencies.infer_freq(self) except ValueError: return None @@ -260,7 +260,7 @@ def min(self, axis=None): if self.hasnans: mask = i8 == tslib.iNaT - min_stamp = self[~mask].asi8.min() + min_stamp = i8[~mask].min() else: min_stamp = i8.min() return self._box_func(min_stamp) @@ -303,7 +303,7 @@ def max(self, axis=None): if self.hasnans: mask = i8 == tslib.iNaT - max_stamp = self[~mask].asi8.max() + max_stamp = i8[~mask].max() else: max_stamp = i8.max() return self._box_func(max_stamp) @@ -352,15 +352,14 @@ def _format_attrs(self): @cache_readonly def _resolution(self): - from pandas.tseries.frequencies import Resolution - return Resolution.get_reso_from_freq(self.freqstr) + return frequencies.Resolution.get_reso_from_freq(self.freqstr) @cache_readonly def resolution(self): """ Returns day, hour, minute, second, millisecond or microsecond """ - return Resolution.get_str(self._resolution) + return frequencies.Resolution.get_str(self._resolution) def _convert_scalar_indexer(self, key, kind=None): """ @@ -509,7 +508,7 @@ def shift(self, n, freq=None): """ if freq is not None and freq != self.freq: if isinstance(freq, compat.string_types): - freq = to_offset(freq) + freq = frequencies.to_offset(freq) result = Index.shift(self, n, freq) if hasattr(self,'tz'): diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 7e5c3af43c861..9349e440eb9e9 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -175,7 +175,7 @@ def get_to_timestamp_base(base): def get_freq_group(freq): """ - Return frequency code group of given frequency str. + Return frequency code group of given frequency str or offset. Example ------- @@ -185,9 +185,16 @@ def get_freq_group(freq): >>> get_freq_group('W-FRI') 4000 """ + if isinstance(freq, offsets.DateOffset): + freq = freq.rule_code + if isinstance(freq, compat.string_types): base, mult = get_freq_code(freq) freq = base + elif isinstance(freq, int): + pass + else: + raise ValueError('input must be str, offset or int') return (freq // 1000) * 1000 @@ -592,7 +599,7 @@ def get_standard_freq(freq): return None if isinstance(freq, DateOffset): - return get_offset_name(freq) + return freq.rule_code code, stride = get_freq_code(freq) return _get_freq_str(code, stride) diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index ec416efe1079f..fb6929c77f6b0 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -444,7 +444,10 @@ def _beg_apply_index(self, i, freq): """Offsets index to beginning of Period frequency""" off = i.to_perioddelta('D') - base_period = i.to_period(freq) + + from pandas.tseries.frequencies import get_freq_code + base, mult = get_freq_code(freq) + base_period = i.to_period(base) if self.n < 0: # when subtracting, dates on start roll to prior roll = np.where(base_period.to_timestamp() == i - off, @@ -459,7 +462,11 @@ def _end_apply_index(self, i, freq): """Offsets index to end of Period frequency""" off = i.to_perioddelta('D') - base_period = i.to_period(freq) + + import pandas.tseries.frequencies as frequencies + from pandas.tseries.frequencies import get_freq_code + base, mult = get_freq_code(freq) + base_period = i.to_period(base) if self.n > 0: # when adding, dtates on end roll to next roll = np.where(base_period.to_timestamp(how='end') == i - off, diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index 56d7d45120fdc..832791fc6933c 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -56,6 +56,8 @@ def dt64arr_to_periodarr(data, freq, tz): # --- Period index sketch +_DIFFERENT_FREQ_ERROR = "Input has different freq={1} from PeriodIndex(freq={0})" + def _period_index_cmp(opname, nat_result=False): """ Wrap comparison operations to convert datetime-like to datetime64 @@ -63,13 +65,16 @@ def _period_index_cmp(opname, nat_result=False): def wrapper(self, other): if isinstance(other, Period): func = getattr(self.values, opname) + other_base, _ = _gfc(other.freq) if other.freq != self.freq: - raise AssertionError("Frequencies must be equal") + msg = _DIFFERENT_FREQ_ERROR.format(self.freqstr, other.freqstr) + raise ValueError(msg) result = func(other.ordinal) elif isinstance(other, PeriodIndex): if other.freq != self.freq: - raise AssertionError("Frequencies must be equal") + msg = _DIFFERENT_FREQ_ERROR.format(self.freqstr, other.freqstr) + raise ValueError(msg) result = getattr(self.values, opname)(other.values) @@ -162,8 +167,6 @@ class PeriodIndex(DatelikeOps, DatetimeIndexOpsMixin, Int64Index): def __new__(cls, data=None, ordinal=None, freq=None, start=None, end=None, periods=None, copy=False, name=None, tz=None, **kwargs): - freq = frequencies.get_standard_freq(freq) - if periods is not None: if is_float(periods): periods = int(periods) @@ -237,8 +240,8 @@ def _from_arraylike(cls, data, freq, tz): else: base1, _ = _gfc(data.freq) base2, _ = _gfc(freq) - data = period.period_asfreq_arr(data.values, base1, - base2, 1) + data = period.period_asfreq_arr(data.values, + base1, base2, 1) else: if freq is None and len(data) > 0: freq = getattr(data[0], 'freq', None) @@ -269,11 +272,9 @@ def _simple_new(cls, values, name=None, freq=None, **kwargs): result = object.__new__(cls) result._data = values result.name = name - if freq is None: - raise ValueError('freq not specified') - result.freq = freq - + raise ValueError('freq is not specified') + result.freq = Period._maybe_convert_freq(freq) result._reset_identity() return result @@ -352,7 +353,8 @@ def astype(self, dtype): def searchsorted(self, key, side='left'): if isinstance(key, Period): if key.freq != self.freq: - raise ValueError("Different period frequency: %s" % key.freq) + msg = _DIFFERENT_FREQ_ERROR.format(self.freqstr, key.freqstr) + raise ValueError(msg) key = key.ordinal elif isinstance(key, compat.string_types): key = Period(key, freq=self.freq).ordinal @@ -375,10 +377,6 @@ def is_full(self): values = self.values return ((values[1:] - values[:-1]) < 2).all() - @property - def freqstr(self): - return self.freq - def asfreq(self, freq=None, how='E'): """ Convert the PeriodIndex to the specified frequency `freq`. @@ -425,11 +423,20 @@ def asfreq(self, freq=None, how='E'): base1, mult1 = _gfc(self.freq) base2, mult2 = _gfc(freq) - if mult2 != 1: - raise ValueError('Only mult == 1 supported') - + asi8 = self.asi8 + # mult1 can't be negative or 0 end = how == 'E' - new_data = period.period_asfreq_arr(self.values, base1, base2, end) + if end: + ordinal = asi8 + mult1 - 1 + else: + ordinal = asi8 + + new_data = period.period_asfreq_arr(ordinal, base1, base2, end) + + if self.hasnans: + mask = asi8 == tslib.iNaT + new_data[mask] = tslib.iNaT + return self._simple_new(new_data, self.name, freq=freq) def to_datetime(self, dayfirst=False): @@ -504,7 +511,7 @@ def to_timestamp(self, freq=None, how='start'): def _maybe_convert_timedelta(self, other): if isinstance(other, (timedelta, np.timedelta64, offsets.Tick, Timedelta)): - offset = frequencies.to_offset(self.freq) + offset = frequencies.to_offset(self.freq.rule_code) if isinstance(offset, offsets.Tick): nanos = tslib._delta_to_nanoseconds(other) offset_nanos = tslib._delta_to_nanoseconds(offset) @@ -513,8 +520,7 @@ def _maybe_convert_timedelta(self, other): elif isinstance(other, offsets.DateOffset): freqstr = frequencies.get_standard_freq(other) base = frequencies.get_base_alias(freqstr) - - if base == self.freq: + if base == self.freq.rule_code: return other.n raise ValueError("Input has different freq from PeriodIndex(freq={0})".format(self.freq)) @@ -536,7 +542,7 @@ def shift(self, n): shifted : PeriodIndex """ mask = self.values == tslib.iNaT - values = self.values + n + values = self.values + n * self.freq.n values[mask] = tslib.iNaT return PeriodIndex(data=values, name=self.name, freq=self.freq) @@ -616,7 +622,7 @@ def get_loc(self, key, method=None, tolerance=None): except TypeError: pass - key = Period(key, self.freq) + key = Period(key, freq=self.freq) try: return Index.get_loc(self, key.ordinal, method, tolerance) except KeyError: @@ -688,7 +694,6 @@ def _get_string_slice(self, key): 'ordered time series') key, parsed, reso = parse_time_string(key, self.freq) - grp = frequencies.Resolution.get_freq_group(reso) freqn = frequencies.get_freq_group(self.freq) if reso in ['day', 'hour', 'minute', 'second'] and not grp < freqn: @@ -723,8 +728,8 @@ def _assert_can_do_setop(self, other): raise ValueError('can only call with other PeriodIndex-ed objects') if self.freq != other.freq: - raise ValueError('Only like-indexed PeriodIndexes compatible ' - 'for join (for now)') + msg = _DIFFERENT_FREQ_ERROR.format(self.freqstr, other.freqstr) + raise ValueError(msg) def _wrap_union_result(self, other, result): name = self.name if self.name == other.name else None @@ -778,12 +783,12 @@ def __array_finalize__(self, obj): self.name = getattr(obj, 'name', None) self._reset_identity() - def take(self, indices, axis=None): + def take(self, indices, axis=0): """ Analogous to ndarray.take """ indices = com._ensure_platform_int(indices) - taken = self.values.take(indices, axis=axis) + taken = self.asi8.take(indices, axis=axis) return self._simple_new(taken, self.name, freq=self.freq) def append(self, other): @@ -850,10 +855,8 @@ def __setstate__(self, state): data = np.empty(nd_state[1], dtype=nd_state[2]) np.ndarray.__setstate__(data, nd_state) - try: # backcompat - self.freq = own_state[1] - except: - pass + # backcompat + self.freq = Period._maybe_convert_freq(own_state[1]) else: # pragma: no cover data = np.empty(state) @@ -863,6 +866,7 @@ def __setstate__(self, state): else: raise Exception("invalid pickle state") + _unpickle_compat = __setstate__ def tz_convert(self, tz): @@ -916,10 +920,13 @@ def tz_localize(self, tz, infer_dst=False): PeriodIndex._add_datetimelike_methods() -def _get_ordinal_range(start, end, periods, freq): +def _get_ordinal_range(start, end, periods, freq, mult=1): if com._count_not_none(start, end, periods) < 2: raise ValueError('Must specify 2 of start, end, periods') + if freq is not None: + _, mult = _gfc(freq) + if start is not None: start = Period(start, freq) if end is not None: @@ -943,15 +950,16 @@ def _get_ordinal_range(start, end, periods, freq): raise ValueError('Could not infer freq from start/end') if periods is not None: + periods = periods * mult if start is None: - data = np.arange(end.ordinal - periods + 1, - end.ordinal + 1, + data = np.arange(end.ordinal - periods + mult, + end.ordinal + 1, mult, dtype=np.int64) else: - data = np.arange(start.ordinal, start.ordinal + periods, + data = np.arange(start.ordinal, start.ordinal + periods, mult, dtype=np.int64) else: - data = np.arange(start.ordinal, end.ordinal + 1, dtype=np.int64) + data = np.arange(start.ordinal, end.ordinal + 1, mult, dtype=np.int64) return data, freq @@ -975,8 +983,6 @@ def _range_from_fields(year=None, month=None, quarter=None, day=None, base = frequencies.FreqGroup.FR_QTR else: base, mult = _gfc(freq) - if mult != 1: - raise ValueError('Only mult == 1 supported') if base != frequencies.FreqGroup.FR_QTR: raise AssertionError("base must equal FR_QTR") @@ -987,9 +993,6 @@ def _range_from_fields(year=None, month=None, quarter=None, day=None, ordinals.append(val) else: base, mult = _gfc(freq) - if mult != 1: - raise ValueError('Only mult == 1 supported') - arrays = _make_field_arrays(year, month, day, hour, minute, second) for y, mth, d, h, mn, s in zip(*arrays): ordinals.append(period.period_ordinal(y, mth, d, h, mn, s, 0, 0, base)) diff --git a/pandas/tseries/tests/test_base.py b/pandas/tseries/tests/test_base.py index 5741e9cf9c093..03c0e3f778e99 100644 --- a/pandas/tseries/tests/test_base.py +++ b/pandas/tseries/tests/test_base.py @@ -1535,10 +1535,10 @@ def _check_freq(index, expected_index): self.assertEqual(result.freq, 'D') def test_order(self): - idx1 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], - freq='D', name='idx') + for freq in ['D', '2D', '4D']: + idx = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], + freq=freq, name='idx') - for idx in [idx1]: ordered = idx.sort_values() self.assert_index_equal(ordered, idx) self.assertEqual(ordered.freq, idx.freq) @@ -1546,18 +1546,21 @@ def test_order(self): ordered = idx.sort_values(ascending=False) expected = idx[::-1] self.assert_index_equal(ordered, expected) - self.assertEqual(ordered.freq, 'D') + self.assertEqual(ordered.freq, expected.freq) + self.assertEqual(ordered.freq, freq) ordered, indexer = idx.sort_values(return_indexer=True) self.assert_index_equal(ordered, idx) self.assert_numpy_array_equal(indexer, np.array([0, 1, 2])) - self.assertEqual(ordered.freq, 'D') + self.assertEqual(ordered.freq, idx.freq) + self.assertEqual(ordered.freq, freq) ordered, indexer = idx.sort_values(return_indexer=True, ascending=False) expected = idx[::-1] self.assert_index_equal(ordered, expected) self.assert_numpy_array_equal(indexer, np.array([2, 1, 0])) - self.assertEqual(ordered.freq, 'D') + self.assertEqual(ordered.freq, expected.freq) + self.assertEqual(ordered.freq, freq) idx1 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05', '2011-01-02', '2011-01-01'], freq='D', name='idx1') @@ -1610,6 +1613,7 @@ def test_getitem(self): name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) + self.assertEqual(result.freq, 'D') result = idx[0:10:2] expected = pd.PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05', @@ -1617,6 +1621,7 @@ def test_getitem(self): freq='D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) + self.assertEqual(result.freq, 'D') result = idx[-20:-5:3] expected = pd.PeriodIndex(['2011-01-12', '2011-01-15', '2011-01-18', @@ -1624,6 +1629,7 @@ def test_getitem(self): freq='D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) + self.assertEqual(result.freq, 'D') result = idx[4::-1] expected = PeriodIndex(['2011-01-05', '2011-01-04', '2011-01-03', @@ -1631,6 +1637,7 @@ def test_getitem(self): freq='D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) + self.assertEqual(result.freq, 'D') def test_take(self): #GH 10295 @@ -1647,6 +1654,7 @@ def test_take(self): expected = pd.period_range('2011-01-01', '2011-01-03', freq='D', name='idx') self.assert_index_equal(result, expected) + self.assertEqual(result.freq, 'D') self.assertEqual(result.freq, expected.freq) result = idx.take([0, 2, 4]) @@ -1654,24 +1662,28 @@ def test_take(self): freq='D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) + self.assertEqual(result.freq, 'D') result = idx.take([7, 4, 1]) expected = pd.PeriodIndex(['2011-01-08', '2011-01-05', '2011-01-02'], freq='D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) + self.assertEqual(result.freq, 'D') result = idx.take([3, 2, 5]) expected = PeriodIndex(['2011-01-04', '2011-01-03', '2011-01-06'], freq='D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) + self.assertEqual(result.freq, 'D') result = idx.take([-3, 2, 5]) expected = PeriodIndex(['2011-01-29', '2011-01-03', '2011-01-06'], freq='D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) + self.assertEqual(result.freq, 'D') if __name__ == '__main__': diff --git a/pandas/tseries/tests/test_frequencies.py b/pandas/tseries/tests/test_frequencies.py index 070363460f791..b783459cbfe95 100644 --- a/pandas/tseries/tests/test_frequencies.py +++ b/pandas/tseries/tests/test_frequencies.py @@ -129,9 +129,48 @@ def test_anchored_shortcuts(): expected = frequencies.to_offset('W-SUN') assert(result == expected) - result = frequencies.to_offset('Q') - expected = frequencies.to_offset('Q-DEC') - assert(result == expected) + result1 = frequencies.to_offset('Q') + result2 = frequencies.to_offset('Q-DEC') + expected = offsets.QuarterEnd(startingMonth=12) + assert(result1 == expected) + assert(result2 == expected) + + result1 = frequencies.to_offset('Q-MAY') + expected = offsets.QuarterEnd(startingMonth=5) + assert(result1 == expected) + + +def test_get_rule_month(): + result = frequencies._get_rule_month('W') + assert(result == 'DEC') + result = frequencies._get_rule_month(offsets.Week()) + assert(result == 'DEC') + + result = frequencies._get_rule_month('D') + assert(result == 'DEC') + result = frequencies._get_rule_month(offsets.Day()) + assert(result == 'DEC') + + result = frequencies._get_rule_month('Q') + assert(result == 'DEC') + result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=12)) + print(result == 'DEC') + + result = frequencies._get_rule_month('Q-JAN') + assert(result == 'JAN') + result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=1)) + assert(result == 'JAN') + + result = frequencies._get_rule_month('A-DEC') + assert(result == 'DEC') + result = frequencies._get_rule_month(offsets.YearEnd()) + assert(result == 'DEC') + + result = frequencies._get_rule_month('A-MAY') + assert(result == 'MAY') + result = frequencies._get_rule_month(offsets.YearEnd(month=5)) + assert(result == 'MAY') + class TestFrequencyCode(tm.TestCase): @@ -154,6 +193,23 @@ def test_freq_code(self): result = frequencies.get_freq_group(code) self.assertEqual(result, code // 1000 * 1000) + def test_freq_group(self): + self.assertEqual(frequencies.get_freq_group('A'), 1000) + self.assertEqual(frequencies.get_freq_group('3A'), 1000) + self.assertEqual(frequencies.get_freq_group('-1A'), 1000) + self.assertEqual(frequencies.get_freq_group('A-JAN'), 1000) + self.assertEqual(frequencies.get_freq_group('A-MAY'), 1000) + self.assertEqual(frequencies.get_freq_group(offsets.YearEnd()), 1000) + self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=1)), 1000) + self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=5)), 1000) + + self.assertEqual(frequencies.get_freq_group('W'), 4000) + self.assertEqual(frequencies.get_freq_group('W-MON'), 4000) + self.assertEqual(frequencies.get_freq_group('W-FRI'), 4000) + self.assertEqual(frequencies.get_freq_group(offsets.Week()), 4000) + self.assertEqual(frequencies.get_freq_group(offsets.Week(weekday=1)), 4000) + self.assertEqual(frequencies.get_freq_group(offsets.Week(weekday=5)), 4000) + def test_get_to_timestamp_base(self): tsb = frequencies.get_to_timestamp_base diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index cdd9d036fcadc..c828d6d7effb6 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -59,6 +59,10 @@ def test_period_cons_quarterly(self): p = Period(stamp, freq=freq) self.assertEqual(p, exp) + stamp = exp.to_timestamp('3D', how='end') + p = Period(stamp, freq=freq) + self.assertEqual(p, exp) + def test_period_cons_annual(self): # bugs in scikits.timeseries for month in MONTHS: @@ -82,28 +86,109 @@ def test_period_cons_nat(self): p = Period('NaT', freq='M') self.assertEqual(p.ordinal, tslib.iNaT) self.assertEqual(p.freq, 'M') + self.assertEqual((p + 1).ordinal, tslib.iNaT) p = Period('nat', freq='W-SUN') self.assertEqual(p.ordinal, tslib.iNaT) self.assertEqual(p.freq, 'W-SUN') + self.assertEqual((p + 1).ordinal, tslib.iNaT) p = Period(tslib.iNaT, freq='D') self.assertEqual(p.ordinal, tslib.iNaT) self.assertEqual(p.freq, 'D') + self.assertEqual((p + 1).ordinal, tslib.iNaT) + + p = Period(tslib.iNaT, freq='3D') + self.assertEqual(p.ordinal, tslib.iNaT) + self.assertEqual(p.freq, offsets.Day(3)) + self.assertEqual(p.freqstr, '3D') + self.assertEqual((p + 1).ordinal, tslib.iNaT) self.assertRaises(ValueError, Period, 'NaT') + def test_period_cons_mult(self): + p1 = Period('2011-01', freq='3M') + p2 = Period('2011-01', freq='M') + self.assertEqual(p1.ordinal, p2.ordinal) + + self.assertEqual(p1.freq, offsets.MonthEnd(3)) + self.assertEqual(p1.freqstr, '3M') + + self.assertEqual(p2.freq, offsets.MonthEnd()) + self.assertEqual(p2.freqstr, 'M') + + result = p1 + 1 + self.assertEqual(result.ordinal, (p2 + 3).ordinal) + self.assertEqual(result.freq, p1.freq) + self.assertEqual(result.freqstr, '3M') + + result = p1 - 1 + self.assertEqual(result.ordinal, (p2 - 3).ordinal) + self.assertEqual(result.freq, p1.freq) + self.assertEqual(result.freqstr, '3M') + + msg = ('Frequency must be positive, because it' + ' represents span: -3M') + with tm.assertRaisesRegexp(ValueError, msg): + Period('2011-01', freq='-3M') + + msg = ('Frequency must be positive, because it' + ' represents span: 0M') + with tm.assertRaisesRegexp(ValueError, msg): + Period('2011-01', freq='0M') + def test_timestamp_tz_arg(self): + tm._skip_if_no_pytz() import pytz - p = Period('1/1/2005', freq='M').to_timestamp(tz='Europe/Brussels') - self.assertEqual(p.tz, - pytz.timezone('Europe/Brussels').normalize(p).tzinfo) + for case in ['Europe/Brussels', 'Asia/Tokyo', 'US/Pacific']: + p = Period('1/1/2005', freq='M').to_timestamp(tz=case) + exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case) + exp_zone = pytz.timezone(case).normalize(p) + + self.assertEqual(p, exp) + self.assertEqual(p.tz, exp_zone.tzinfo) + self.assertEqual(p.tz, exp.tz) + + p = Period('1/1/2005', freq='3H').to_timestamp(tz=case) + exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case) + exp_zone = pytz.timezone(case).normalize(p) + + self.assertEqual(p, exp) + self.assertEqual(p.tz, exp_zone.tzinfo) + self.assertEqual(p.tz, exp.tz) + + p = Period('1/1/2005', freq='A').to_timestamp(freq='A', tz=case) + exp = Timestamp('31/12/2005', tz='UTC').tz_convert(case) + exp_zone = pytz.timezone(case).normalize(p) + + self.assertEqual(p, exp) + self.assertEqual(p.tz, exp_zone.tzinfo) + self.assertEqual(p.tz, exp.tz) + + p = Period('1/1/2005', freq='A').to_timestamp(freq='3H', tz=case) + exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case) + exp_zone = pytz.timezone(case).normalize(p) + + self.assertEqual(p, exp) + self.assertEqual(p.tz, exp_zone.tzinfo) + self.assertEqual(p.tz, exp.tz) def test_timestamp_tz_arg_dateutil(self): from pandas.tslib import _dateutil_gettz as gettz from pandas.tslib import maybe_get_tz - p = Period('1/1/2005', freq='M').to_timestamp(tz=maybe_get_tz('dateutil/Europe/Brussels')) - self.assertEqual(p.tz, gettz('Europe/Brussels')) + for case in ['dateutil/Europe/Brussels', 'dateutil/Asia/Tokyo', + 'dateutil/US/Pacific']: + p = Period('1/1/2005', freq='M').to_timestamp(tz=maybe_get_tz(case)) + exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case) + self.assertEqual(p, exp) + self.assertEqual(p.tz, gettz(case.split('/', 1)[1])) + self.assertEqual(p.tz, exp.tz) + + p = Period('1/1/2005', freq='M').to_timestamp(freq='3H', tz=maybe_get_tz(case)) + exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case) + self.assertEqual(p, exp) + self.assertEqual(p.tz, gettz(case.split('/', 1)[1])) + self.assertEqual(p.tz, exp.tz) def test_timestamp_tz_arg_dateutil_from_string(self): from pandas.tslib import _dateutil_gettz as gettz @@ -117,6 +202,21 @@ def test_timestamp_nat_tz(self): t = Period('NaT', freq='M').to_timestamp(tz='Asia/Tokyo') self.assertTrue(t is tslib.NaT) + def test_timestamp_mult(self): + p = pd.Period('2011-01', freq='M') + self.assertEqual(p.to_timestamp(how='S'), pd.Timestamp('2011-01-01')) + self.assertEqual(p.to_timestamp(how='E'), pd.Timestamp('2011-01-31')) + + p = pd.Period('2011-01', freq='3M') + self.assertEqual(p.to_timestamp(how='S'), pd.Timestamp('2011-01-01')) + self.assertEqual(p.to_timestamp(how='E'), pd.Timestamp('2011-03-31')) + + def test_timestamp_nat_mult(self): + for freq in ['M', '3M']: + p = pd.Period('NaT', freq=freq) + self.assertTrue(p.to_timestamp(how='S') is pd.NaT) + self.assertTrue(p.to_timestamp(how='E') is pd.NaT) + def test_period_constructor(self): i1 = Period('1/1/2005', freq='M') i2 = Period('Jan 2005') @@ -252,9 +352,87 @@ def test_period_constructor(self): self.assertRaises(ValueError, Period, '2007-1-1', freq='X') + + def test_period_constructor_offsets(self): + self.assertEqual(Period('1/1/2005', freq=offsets.MonthEnd()), + Period('1/1/2005', freq='M')) + self.assertEqual(Period('2005', freq=offsets.YearEnd()), + Period('2005', freq='A')) + self.assertEqual(Period('2005', freq=offsets.MonthEnd()), + Period('2005', freq='M')) + self.assertEqual(Period('3/10/12', freq=offsets.BusinessDay()), + Period('3/10/12', freq='B')) + self.assertEqual(Period('3/10/12', freq=offsets.Day()), + Period('3/10/12', freq='D')) + + self.assertEqual(Period(year=2005, quarter=1, + freq=offsets.QuarterEnd(startingMonth=12)), + Period(year=2005, quarter=1, freq='Q')) + self.assertEqual(Period(year=2005, quarter=2, + freq=offsets.QuarterEnd(startingMonth=12)), + Period(year=2005, quarter=2, freq='Q')) + + self.assertEqual(Period(year=2005, month=3, day=1, freq=offsets.Day()), + Period(year=2005, month=3, day=1, freq='D')) + self.assertEqual(Period(year=2012, month=3, day=10, freq=offsets.BDay()), + Period(year=2012, month=3, day=10, freq='B')) + + expected = Period('2005-03-01', freq='3D') + self.assertEqual(Period(year=2005, month=3, day=1, freq=offsets.Day(3)), + expected) + self.assertEqual(Period(year=2005, month=3, day=1, freq='3D'), + expected) + + self.assertEqual(Period(year=2012, month=3, day=10, freq=offsets.BDay(3)), + Period(year=2012, month=3, day=10, freq='3B')) + + self.assertEqual(Period(200701, freq=offsets.MonthEnd()), + Period(200701, freq='M')) + + i1 = Period(ordinal=200701, freq=offsets.MonthEnd()) + i2 = Period(ordinal=200701, freq='M') + self.assertEqual(i1, i2) + self.assertEqual(i1.year, 18695) + self.assertEqual(i2.year, 18695) + + i1 = Period(datetime(2007, 1, 1), freq='M') + i2 = Period('200701', freq='M') + self.assertEqual(i1, i2) + + i1 = Period(date(2007, 1, 1), freq='M') + i2 = Period(datetime(2007, 1, 1), freq='M') + i3 = Period(np.datetime64('2007-01-01'), freq='M') + i4 = Period(np.datetime64('2007-01-01 00:00:00Z'), freq='M') + i5 = Period(np.datetime64('2007-01-01 00:00:00.000Z'), freq='M') + self.assertEqual(i1, i2) + self.assertEqual(i1, i3) + self.assertEqual(i1, i4) + self.assertEqual(i1, i5) + + i1 = Period('2007-01-01 09:00:00.001') + expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq='L') + self.assertEqual(i1, expected) + + expected = Period(np.datetime64('2007-01-01 09:00:00.001Z'), freq='L') + self.assertEqual(i1, expected) + + i1 = Period('2007-01-01 09:00:00.00101') + expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq='U') + self.assertEqual(i1, expected) + + expected = Period(np.datetime64('2007-01-01 09:00:00.00101Z'), + freq='U') + self.assertEqual(i1, expected) + + self.assertRaises(ValueError, Period, ordinal=200701) + + self.assertRaises(ValueError, Period, '2007-1-1', freq='X') + + def test_freq_str(self): i1 = Period('1982', freq='Min') - self.assertNotEqual(i1.freq[0], '1') + self.assertEqual(i1.freq, offsets.Minute()) + self.assertEqual(i1.freqstr, 'T') def test_repr(self): p = Period('Jan-2000') @@ -297,11 +475,14 @@ def test_to_timestamp(self): aliases = ['s', 'StarT', 'BEGIn'] for a in aliases: self.assertEqual(start_ts, p.to_timestamp('D', how=a)) + # freq with mult should not affect to the result + self.assertEqual(start_ts, p.to_timestamp('3D', how=a)) end_ts = p.to_timestamp(how='E') aliases = ['e', 'end', 'FINIsH'] for a in aliases: self.assertEqual(end_ts, p.to_timestamp('D', how=a)) + self.assertEqual(end_ts, p.to_timestamp('3D', how=a)) from_lst = ['A', 'Q', 'M', 'W', 'B', 'D', 'H', 'Min', 'S'] @@ -325,10 +506,15 @@ def _ex(p): result = p.to_timestamp('H', how='end') expected = datetime(1985, 12, 31, 23) self.assertEqual(result, expected) + result = p.to_timestamp('3H', how='end') + self.assertEqual(result, expected) result = p.to_timestamp('T', how='end') expected = datetime(1985, 12, 31, 23, 59) self.assertEqual(result, expected) + result = p.to_timestamp('2T', how='end') + self.assertEqual(result, expected) + result = p.to_timestamp(how='end') expected = datetime(1985, 12, 31) @@ -341,8 +527,10 @@ def _ex(p): self.assertEqual(result, expected) result = p.to_timestamp('S', how='start') self.assertEqual(result, expected) - - assertRaisesRegexp(ValueError, 'Only mult == 1', p.to_timestamp, '5t') + result = p.to_timestamp('3H', how='start') + self.assertEqual(result, expected) + result = p.to_timestamp('5S', how='start') + self.assertEqual(result, expected) p = Period('NaT', freq='W') self.assertTrue(p.to_timestamp() is tslib.NaT) @@ -354,9 +542,9 @@ def test_start_time(self): p = Period('2012', freq=f) self.assertEqual(p.start_time, xp) self.assertEqual(Period('2012', freq='B').start_time, - datetime(2012, 1, 2)) + datetime(2012, 1, 2)) self.assertEqual(Period('2012', freq='W').start_time, - datetime(2011, 12, 26)) + datetime(2011, 12, 26)) p = Period('NaT', freq='W') self.assertTrue(p.start_time is tslib.NaT) @@ -489,19 +677,20 @@ def test_properties_daily(self): def test_properties_hourly(self): # Test properties on Periods with hourly frequency. - h_date = Period(freq='H', year=2007, month=1, day=1, hour=0) - # - assert_equal(h_date.year, 2007) - assert_equal(h_date.quarter, 1) - assert_equal(h_date.month, 1) - assert_equal(h_date.day, 1) - assert_equal(h_date.weekday, 0) - assert_equal(h_date.dayofyear, 1) - assert_equal(h_date.hour, 0) - assert_equal(h_date.days_in_month, 31) - assert_equal(Period(freq='H', year=2012, month=2, day=1, - hour=0).days_in_month, 29) - # + h_date1 = Period(freq='H', year=2007, month=1, day=1, hour=0) + h_date2 = Period(freq='2H', year=2007, month=1, day=1, hour=0) + + for h_date in [h_date1, h_date2]: + assert_equal(h_date.year, 2007) + assert_equal(h_date.quarter, 1) + assert_equal(h_date.month, 1) + assert_equal(h_date.day, 1) + assert_equal(h_date.weekday, 0) + assert_equal(h_date.dayofyear, 1) + assert_equal(h_date.hour, 0) + assert_equal(h_date.days_in_month, 31) + assert_equal(Period(freq='H', year=2012, month=2, day=1, + hour=0).days_in_month, 29) def test_properties_minutely(self): # Test properties on Periods with minutely frequency. @@ -556,9 +745,15 @@ def test_pnow(self): exp = Period(dt, freq='D') self.assertEqual(val, exp) + val2 = period.pnow('2D') + exp2 = Period(dt, freq='2D') + self.assertEqual(val2, exp2) + self.assertEqual(val.ordinal, val2.ordinal) + self.assertEqual(val.ordinal, exp2.ordinal) + def test_constructor_corner(self): - self.assertRaises(ValueError, Period, year=2007, month=1, - freq='2M') + expected = Period('2007-01', freq='2M') + self.assertEqual(Period(year=2007, month=1, freq='2M'), expected) self.assertRaises(ValueError, Period, datetime.now()) self.assertRaises(ValueError, Period, datetime.now().date()) @@ -613,7 +808,13 @@ class TestFreqConversion(tm.TestCase): def test_asfreq_corner(self): val = Period(freq='A', year=2007) - self.assertRaises(ValueError, val.asfreq, '5t') + result1 = val.asfreq('5t') + result2 = val.asfreq('t') + expected = Period('2007-12-31 23:59', freq='t') + self.assertEqual(result1.ordinal, expected.ordinal) + self.assertEqual(result1.freqstr, '5T') + self.assertEqual(result2.ordinal, expected.ordinal) + self.assertEqual(result2.freqstr, 'T') def test_conv_annual(self): # frequency conversion tests: from Annual Frequency @@ -795,7 +996,6 @@ def test_conv_monthly(self): def test_conv_weekly(self): # frequency conversion tests: from Weekly Frequency - ival_W = Period(freq='W', year=2007, month=1, day=1) ival_WSUN = Period(freq='W', year=2007, month=1, day=7) @@ -1311,6 +1511,92 @@ def test_asfreq_nat(self): self.assertEqual(result.ordinal, tslib.iNaT) self.assertEqual(result.freq, 'M') + def test_asfreq_mult(self): + # normal freq to mult freq + p = Period(freq='A', year=2007) + # ordinal will not change + for freq in ['3A', offsets.YearEnd(3)]: + result = p.asfreq(freq) + expected = Period('2007', freq='3A') + + self.assertEqual(result, expected) + self.assertEqual(result.ordinal, expected.ordinal) + self.assertEqual(result.freq, expected.freq) + # ordinal will not change + for freq in ['3A', offsets.YearEnd(3)]: + result = p.asfreq(freq, how='S') + expected = Period('2007', freq='3A') + + self.assertEqual(result, expected) + self.assertEqual(result.ordinal, expected.ordinal) + self.assertEqual(result.freq, expected.freq) + + # mult freq to normal freq + p = Period(freq='3A', year=2007) + # ordinal will change because how=E is the default + for freq in ['A', offsets.YearEnd()]: + result = p.asfreq(freq) + expected = Period('2009', freq='A') + + self.assertEqual(result, expected) + self.assertEqual(result.ordinal, expected.ordinal) + self.assertEqual(result.freq, expected.freq) + # ordinal will not change + for freq in ['A', offsets.YearEnd()]: + result = p.asfreq(freq, how='S') + expected = Period('2007', freq='A') + + self.assertEqual(result, expected) + self.assertEqual(result.ordinal, expected.ordinal) + self.assertEqual(result.freq, expected.freq) + + p = Period(freq='A', year=2007) + for freq in ['2M', offsets.MonthEnd(2)]: + result = p.asfreq(freq) + expected = Period('2007-12', freq='2M') + + self.assertEqual(result, expected) + self.assertEqual(result.ordinal, expected.ordinal) + self.assertEqual(result.freq, expected.freq) + for freq in ['2M', offsets.MonthEnd(2)]: + result = p.asfreq(freq, how='S') + expected = Period('2007-01', freq='2M') + + self.assertEqual(result, expected) + self.assertEqual(result.ordinal, expected.ordinal) + self.assertEqual(result.freq, expected.freq) + + p = Period(freq='3A', year=2007) + for freq in ['2M', offsets.MonthEnd(2)]: + result = p.asfreq(freq) + expected = Period('2009-12', freq='2M') + + self.assertEqual(result, expected) + self.assertEqual(result.ordinal, expected.ordinal) + self.assertEqual(result.freq, expected.freq) + for freq in ['2M', offsets.MonthEnd(2)]: + result = p.asfreq(freq, how='S') + expected = Period('2007-01', freq='2M') + + self.assertEqual(result, expected) + self.assertEqual(result.ordinal, expected.ordinal) + self.assertEqual(result.freq, expected.freq) + + def test_asfreq_mult_nat(self): + # normal freq to mult freq + for p in [Period('NaT', freq='A'), Period('NaT', freq='3A'), + Period('NaT', freq='2M'), Period('NaT', freq='3D')]: + for freq in ['3A', offsets.YearEnd(3)]: + result = p.asfreq(freq) + expected = Period('NaT', freq='3A') + self.assertEqual(result.ordinal, pd.tslib.iNaT) + self.assertEqual(result.freq, expected.freq) + + result = p.asfreq(freq, how='S') + expected = Period('NaT', freq='3A') + self.assertEqual(result.ordinal, pd.tslib.iNaT) + self.assertEqual(result.freq, expected.freq) + class TestPeriodIndex(tm.TestCase): @@ -1352,9 +1638,8 @@ def test_constructor_field_arrays(self): expected = period_range('1990Q3', '2009Q2', freq='Q-DEC') self.assertTrue(index.equals(expected)) - self.assertRaises( - ValueError, PeriodIndex, year=years, quarter=quarters, - freq='2Q-DEC') + index2 = PeriodIndex(year=years, quarter=quarters, freq='2Q-DEC') + tm.assert_numpy_array_equal(index.asi8, index2.asi8) index = PeriodIndex(year=years, quarter=quarters) self.assertTrue(index.equals(expected)) @@ -1422,6 +1707,18 @@ def test_constructor_fromarraylike(self): result = PeriodIndex(idx, freq='M') self.assertTrue(result.equals(idx)) + result = PeriodIndex(idx, freq=offsets.MonthEnd()) + self.assertTrue(result.equals(idx)) + self.assertTrue(result.freq, 'M') + + result = PeriodIndex(idx, freq='2M') + self.assertTrue(result.equals(idx)) + self.assertTrue(result.freq, '2M') + + result = PeriodIndex(idx, freq=offsets.MonthEnd(2)) + self.assertTrue(result.equals(idx)) + self.assertTrue(result.freq, '2M') + result = PeriodIndex(idx, freq='D') exp = idx.asfreq('D', 'e') self.assertTrue(result.equals(exp)) @@ -1455,6 +1752,49 @@ def test_constructor_year_and_quarter(self): p = PeriodIndex(lops) tm.assert_index_equal(p, idx) + def test_constructor_freq_mult(self): + # GH #7811 + for func in [PeriodIndex, period_range]: + # must be the same, but for sure... + pidx = func(start='2014-01', freq='2M', periods=4) + expected = PeriodIndex(['2014-01', '2014-03', '2014-05', '2014-07'], freq='M') + tm.assert_index_equal(pidx, expected) + + pidx = func(start='2014-01-02', end='2014-01-15', freq='3D') + expected = PeriodIndex(['2014-01-02', '2014-01-05', '2014-01-08', '2014-01-11', + '2014-01-14'], freq='D') + tm.assert_index_equal(pidx, expected) + + pidx = func(end='2014-01-01 17:00', freq='4H', periods=3) + expected = PeriodIndex(['2014-01-01 09:00', '2014-01-01 13:00', + '2014-01-01 17:00'], freq='4H') + tm.assert_index_equal(pidx, expected) + + msg = ('Frequency must be positive, because it' + ' represents span: -1M') + with tm.assertRaisesRegexp(ValueError, msg): + PeriodIndex(['2011-01'], freq='-1M') + + msg = ('Frequency must be positive, because it' + ' represents span: 0M') + with tm.assertRaisesRegexp(ValueError, msg): + PeriodIndex(['2011-01'], freq='0M') + + msg = ('Frequency must be positive, because it' + ' represents span: 0M') + with tm.assertRaisesRegexp(ValueError, msg): + period_range('2011-01', periods=3, freq='0M') + + def test_constructor_freq_mult_dti_compat(self): + import itertools + mults = [1, 2, 3, 4, 5] + freqs = ['A', 'M', 'D', 'T', 'S'] + for mult, freq in itertools.product(mults, freqs): + freqstr = str(mult) + freq + pidx = PeriodIndex(start='2014-04-01', freq=freqstr, periods=10) + expected = date_range(start='2014-04-01', freq=freqstr, periods=10).to_period(freq) + tm.assert_index_equal(pidx, expected) + def test_is_(self): create_index = lambda: PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009') @@ -1563,6 +1903,13 @@ def test_slice_with_zero_step_raises(self): self.assertRaisesRegexp(ValueError, 'slice step cannot be zero', lambda: ts.ix[::0]) + def test_contains(self): + rng = period_range('2007-01', freq='M', periods=10) + + self.assertTrue(Period('2007-01', freq='M') in rng) + self.assertFalse(Period('2007-01', freq='D') in rng) + self.assertFalse(Period('2007-01', freq='2M') in rng) + def test_sub(self): rng = period_range('2007-01', periods=50) @@ -1614,8 +1961,6 @@ def _get_with_delta(delta, freq='A-DEC'): exp_index = _get_with_delta(delta) self.assertTrue(result.index.equals(exp_index)) - self.assertRaises(ValueError, index.to_timestamp, '5t') - index = PeriodIndex(freq='H', start='1/1/2001', end='1/2/2001') series = Series(1, index=index, name='foo') @@ -1651,7 +1996,7 @@ def test_to_timestamp_repr_is_code(self): for z in zs: self.assertEqual( eval(repr(z)), z) - def test_to_timestamp_period_nat(self): + def test_to_timestamp_pi_nat(self): # GH 7228 index = PeriodIndex(['NaT', '2011-01', '2011-02'], freq='M', name='idx') @@ -1665,6 +2010,25 @@ def test_to_timestamp_period_nat(self): self.assertTrue(result2.equals(index)) self.assertEqual(result2.name, 'idx') + result3 = result.to_period(freq='3M') + exp = PeriodIndex(['NaT', '2011-01', '2011-02'], freq='3M', name='idx') + self.assert_index_equal(result3, exp) + self.assertEqual(result3.freqstr, '3M') + + msg = ('Frequency must be positive, because it' + ' represents span: -2A') + with tm.assertRaisesRegexp(ValueError, msg): + result.to_period(freq='-2A') + + def test_to_timestamp_pi_mult(self): + idx = PeriodIndex(['2011-01', 'NaT', '2011-02'], freq='2M', name='idx') + result = idx.to_timestamp() + expected = DatetimeIndex(['2011-01-01', 'NaT', '2011-02-01'], name='idx') + self.assert_index_equal(result, expected) + result = idx.to_timestamp(how='E') + expected = DatetimeIndex(['2011-02-28', 'NaT', '2011-03-31'], name='idx') + self.assert_index_equal(result, expected) + def test_as_frame_columns(self): rng = period_range('1/1/2000', periods=5) df = DataFrame(randn(10, 5), columns=rng) @@ -1794,7 +2158,17 @@ def _get_with_delta(delta, freq='A-DEC'): # invalid axis assertRaisesRegexp(ValueError, 'axis', df.to_timestamp, axis=2) - assertRaisesRegexp(ValueError, 'Only mult == 1', df.to_timestamp, '5t', axis=1) + + result1 = df.to_timestamp('5t', axis=1) + result2 = df.to_timestamp('t', axis=1) + expected = pd.date_range('2001-01-01', '2009-01-01', freq='AS') + self.assertTrue(isinstance(result1.columns, DatetimeIndex)) + self.assertTrue(isinstance(result2.columns, DatetimeIndex)) + self.assert_numpy_array_equal(result1.columns.asi8, expected.asi8) + self.assert_numpy_array_equal(result2.columns.asi8, expected.asi8) + # PeriodIndex.to_timestamp always use 'infer' + self.assertEqual(result1.columns.freqstr, 'AS-JAN') + self.assertEqual(result2.columns.freqstr, 'AS-JAN') def test_index_duplicate_periods(self): # monotonic @@ -2007,7 +2381,13 @@ def test_asfreq(self): self.assertEqual(pi7.asfreq('Min', 'S'), pi6) self.assertRaises(ValueError, pi7.asfreq, 'T', 'foo') - self.assertRaises(ValueError, pi1.asfreq, '5t') + result1 = pi1.asfreq('3M') + result2 = pi1.asfreq('M') + expected = PeriodIndex(freq='M', start='2001-12', end='2001-12') + self.assert_numpy_array_equal(result1.asi8, expected.asi8) + self.assertEqual(result1.freqstr, '3M') + self.assert_numpy_array_equal(result2.asi8, expected.asi8) + self.assertEqual(result2.freqstr, 'M') def test_asfreq_nat(self): idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M') @@ -2015,6 +2395,22 @@ def test_asfreq_nat(self): expected = PeriodIndex(['2011Q1', '2011Q1', 'NaT', '2011Q2'], freq='Q') self.assertTrue(result.equals(expected)) + def test_asfreq_mult_pi(self): + pi = PeriodIndex(['2001-01', '2001-02', 'NaT', '2001-03'], freq='2M') + + for freq in ['D', '3D']: + result = pi.asfreq(freq) + exp = PeriodIndex(['2001-02-28', '2001-03-31', 'NaT', + '2001-04-30'], freq=freq) + self.assert_index_equal(result, exp) + self.assertEqual(result.freq, exp.freq) + + result = pi.asfreq(freq, how='S') + exp = PeriodIndex(['2001-01-01', '2001-02-01', 'NaT', + '2001-03-01'], freq=freq) + self.assert_index_equal(result, exp) + self.assertEqual(result.freq, exp.freq) + def test_period_index_length(self): pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009') assert_equal(len(pi), 9) @@ -2120,12 +2516,19 @@ def test_dti_to_period(self): dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M') pi1 = dti.to_period() pi2 = dti.to_period(freq='D') + pi3 = dti.to_period(freq='3D') self.assertEqual(pi1[0], Period('Jan 2005', freq='M')) self.assertEqual(pi2[0], Period('1/31/2005', freq='D')) + self.assertEqual(pi3[0], Period('1/31/2005', freq='3D')) self.assertEqual(pi1[-1], Period('Nov 2005', freq='M')) self.assertEqual(pi2[-1], Period('11/30/2005', freq='D')) + self.assertEqual(pi3[-1], Period('11/30/2005', freq='3D')) + + tm.assert_index_equal(pi1, period_range('1/1/2005', '11/1/2005', freq='M')) + tm.assert_index_equal(pi2, period_range('1/1/2005', '11/1/2005', freq='M').asfreq('D')) + tm.assert_index_equal(pi3, period_range('1/1/2005', '11/1/2005', freq='M').asfreq('3D')) def test_pindex_slice_index(self): pi = PeriodIndex(start='1/1/10', end='12/31/12', freq='M') @@ -2217,7 +2620,6 @@ def test_getitem_seconds(self): continue s = Series(np.random.rand(len(idx)), index=idx) - assert_series_equal(s['2013/01/01 10:00'], s[3600:3660]) assert_series_equal(s['2013/01/01 9H'], s[:3600]) for d in ['2013/01/01', '2013/01', '2013']: @@ -2318,35 +2720,35 @@ def test_to_period_monthish(self): prng = rng.to_period() self.assertEqual(prng.freq, 'M') - def test_no_multiples(self): - self.assertRaises(ValueError, period_range, '1989Q3', periods=10, - freq='2Q') - - self.assertRaises(ValueError, period_range, '1989', periods=10, - freq='2A') - self.assertRaises(ValueError, Period, '1989', freq='2A') - - # def test_pindex_multiples(self): - # pi = PeriodIndex(start='1/1/10', end='12/31/12', freq='2M') - # self.assertEqual(pi[0], Period('1/1/10', '2M')) - # self.assertEqual(pi[1], Period('3/1/10', '2M')) - - # self.assertEqual(pi[0].asfreq('6M'), pi[2].asfreq('6M')) - # self.assertEqual(pi[0].asfreq('A'), pi[2].asfreq('A')) - - # self.assertEqual(pi[0].asfreq('M', how='S'), - # Period('Jan 2010', '1M')) - # self.assertEqual(pi[0].asfreq('M', how='E'), - # Period('Feb 2010', '1M')) - # self.assertEqual(pi[1].asfreq('M', how='S'), - # Period('Mar 2010', '1M')) - - # i = Period('1/1/2010 12:05:18', '5S') - # self.assertEqual(i, Period('1/1/2010 12:05:15', '5S')) - - # i = Period('1/1/2010 12:05:18', '5S') - # self.assertEqual(i.asfreq('1S', how='E'), - # Period('1/1/2010 12:05:19', '1S')) + def test_multiples(self): + result1 = Period('1989', freq='2A') + result2 = Period('1989', freq='A') + self.assertEqual(result1.ordinal, result2.ordinal) + self.assertEqual(result1.freqstr, '2A-DEC') + self.assertEqual(result2.freqstr, 'A-DEC') + self.assertEqual(result1.freq, offsets.YearEnd(2)) + self.assertEqual(result2.freq, offsets.YearEnd()) + + self.assertEqual((result1 + 1).ordinal, result1.ordinal + 2) + self.assertEqual((result1 - 1).ordinal, result2.ordinal - 2) + + def test_pindex_multiples(self): + pi = PeriodIndex(start='1/1/11', end='12/31/11', freq='2M') + expected = PeriodIndex(['2011-01', '2011-03', '2011-05', '2011-07', + '2011-09', '2011-11'], freq='M') + tm.assert_index_equal(pi, expected) + self.assertEqual(pi.freq, offsets.MonthEnd(2)) + self.assertEqual(pi.freqstr, '2M') + + pi = period_range(start='1/1/11', end='12/31/11', freq='2M') + tm.assert_index_equal(pi, expected) + self.assertEqual(pi.freq, offsets.MonthEnd(2)) + self.assertEqual(pi.freqstr, '2M') + + pi = period_range(start='1/1/11', periods=6, freq='2M') + tm.assert_index_equal(pi, expected) + self.assertEqual(pi.freq, offsets.MonthEnd(2)) + self.assertEqual(pi.freqstr, '2M') def test_iteration(self): index = PeriodIndex(start='1/1/10', periods=4, freq='B') @@ -2412,7 +2814,8 @@ def test_align_series(self): # it works! for kind in ['inner', 'outer', 'left', 'right']: ts.align(ts[::2], join=kind) - with assertRaisesRegexp(ValueError, 'Only like-indexed'): + msg = "Input has different freq=D from PeriodIndex\\(freq=A-DEC\\)" + with assertRaisesRegexp(ValueError, msg): ts + ts.asfreq('D', how="end") def test_align_frame(self): @@ -2444,6 +2847,9 @@ def test_union(self): self.assertRaises(ValueError, index.join, index.to_timestamp()) + index3 = period_range('1/1/2000', '1/20/2000', freq='2D') + self.assertRaises(ValueError, index.join, index3) + def test_intersection(self): index = period_range('1/1/2000', '1/20/2000', freq='D') @@ -2461,6 +2867,9 @@ def test_intersection(self): index2 = period_range('1/1/2000', '1/20/2000', freq='W-WED') self.assertRaises(ValueError, index.intersection, index2) + index3 = period_range('1/1/2000', '1/20/2000', freq='2D') + self.assertRaises(ValueError, index.intersection, index3) + def test_fields(self): # year, month, day, hour, minute # second, weekofyear, week, dayofweek, weekday, dayofyear, quarter @@ -2614,7 +3023,8 @@ def test_pickle_freq(self): # GH2891 prng = period_range('1/1/2011', '1/1/2012', freq='M') new_prng = self.round_trip_pickle(prng) - self.assertEqual(new_prng.freq,'M') + self.assertEqual(new_prng.freq, offsets.MonthEnd()) + self.assertEqual(new_prng.freqstr, 'M') def test_slice_keep_name(self): idx = period_range('20010101', periods=10, freq='D', name='bob') @@ -2669,12 +3079,24 @@ def test_combine_first(self): tm.assert_series_equal(result, expected) def test_searchsorted(self): - pidx = pd.period_range('2014-01-01', periods=10, freq='D') - self.assertEqual( - pidx.searchsorted(pd.Period('2014-01-01', freq='D')), 0) - self.assertRaisesRegexp( - ValueError, 'Different period frequency: H', - lambda: pidx.searchsorted(pd.Period('2014-01-01', freq='H'))) + for freq in ['D', '2D']: + pidx = pd.PeriodIndex(['2014-01-01', '2014-01-02', '2014-01-03', + '2014-01-04', '2014-01-05'], freq=freq) + + p1 = pd.Period('2014-01-01', freq=freq) + self.assertEqual(pidx.searchsorted(p1), 0) + + p2 = pd.Period('2014-01-04', freq=freq) + self.assertEqual(pidx.searchsorted(p2), 3) + + msg = "Input has different freq=H from PeriodIndex" + with self.assertRaisesRegexp(ValueError, msg): + pidx.searchsorted(pd.Period('2014-01-01', freq='H')) + + msg = "Input has different freq=5D from PeriodIndex" + with self.assertRaisesRegexp(ValueError, msg): + pidx.searchsorted(pd.Period('2014-01-01', freq='5D')) + def test_round_trip(self): @@ -2704,186 +3126,203 @@ def test_add(self): def test_add_offset(self): # freq is DateOffset - p = Period('2011', freq='A') - self.assertEqual(p + offsets.YearEnd(2), Period('2013', freq='A')) + for freq in ['A', '2A', '3A']: + p = Period('2011', freq=freq) + self.assertEqual(p + offsets.YearEnd(2), Period('2013', freq=freq)) - for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), - np.timedelta64(365, 'D'), timedelta(365)]: - with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'): - p + o + for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), + np.timedelta64(365, 'D'), timedelta(365)]: + with tm.assertRaises(ValueError): + p + o - p = Period('2011-03', freq='M') - self.assertEqual(p + offsets.MonthEnd(2), Period('2011-05', freq='M')) - self.assertEqual(p + offsets.MonthEnd(12), Period('2012-03', freq='M')) + for freq in ['M', '2M', '3M']: + p = Period('2011-03', freq=freq) + self.assertEqual(p + offsets.MonthEnd(2), Period('2011-05', freq=freq)) + self.assertEqual(p + offsets.MonthEnd(12), Period('2012-03', freq=freq)) - for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), - np.timedelta64(365, 'D'), timedelta(365)]: - with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'): - p + o + for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), + np.timedelta64(365, 'D'), timedelta(365)]: + with tm.assertRaises(ValueError): + p + o # freq is Tick - p = Period('2011-04-01', freq='D') - self.assertEqual(p + offsets.Day(5), Period('2011-04-06', freq='D')) - self.assertEqual(p + offsets.Hour(24), Period('2011-04-02', freq='D')) - self.assertEqual(p + np.timedelta64(2, 'D'), Period('2011-04-03', freq='D')) - self.assertEqual(p + np.timedelta64(3600 * 24, 's'), Period('2011-04-02', freq='D')) - self.assertEqual(p + timedelta(-2), Period('2011-03-30', freq='D')) - self.assertEqual(p + timedelta(hours=48), Period('2011-04-03', freq='D')) - - for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), - np.timedelta64(4, 'h'), timedelta(hours=23)]: - with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'): - p + o - - p = Period('2011-04-01 09:00', freq='H') - self.assertEqual(p + offsets.Day(2), Period('2011-04-03 09:00', freq='H')) - self.assertEqual(p + offsets.Hour(3), Period('2011-04-01 12:00', freq='H')) - self.assertEqual(p + np.timedelta64(3, 'h'), Period('2011-04-01 12:00', freq='H')) - self.assertEqual(p + np.timedelta64(3600, 's'), Period('2011-04-01 10:00', freq='H')) - self.assertEqual(p + timedelta(minutes=120), Period('2011-04-01 11:00', freq='H')) - self.assertEqual(p + timedelta(days=4, minutes=180), Period('2011-04-05 12:00', freq='H')) - - for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), - np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]: - with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'): - p + o + for freq in ['D', '2D', '3D']: + p = Period('2011-04-01', freq=freq) + self.assertEqual(p + offsets.Day(5), Period('2011-04-06', freq=freq)) + self.assertEqual(p + offsets.Hour(24), Period('2011-04-02', freq=freq)) + self.assertEqual(p + np.timedelta64(2, 'D'), Period('2011-04-03', freq=freq)) + self.assertEqual(p + np.timedelta64(3600 * 24, 's'), Period('2011-04-02', freq=freq)) + self.assertEqual(p + timedelta(-2), Period('2011-03-30', freq=freq)) + self.assertEqual(p + timedelta(hours=48), Period('2011-04-03', freq=freq)) + + for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), + np.timedelta64(4, 'h'), timedelta(hours=23)]: + with tm.assertRaises(ValueError): + p + o + + for freq in ['H', '2H', '3H']: + p = Period('2011-04-01 09:00', freq=freq) + self.assertEqual(p + offsets.Day(2), Period('2011-04-03 09:00', freq=freq)) + self.assertEqual(p + offsets.Hour(3), Period('2011-04-01 12:00', freq=freq)) + self.assertEqual(p + np.timedelta64(3, 'h'), Period('2011-04-01 12:00', freq=freq)) + self.assertEqual(p + np.timedelta64(3600, 's'), Period('2011-04-01 10:00', freq=freq)) + self.assertEqual(p + timedelta(minutes=120), Period('2011-04-01 11:00', freq=freq)) + self.assertEqual(p + timedelta(days=4, minutes=180), Period('2011-04-05 12:00', freq=freq)) + + for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), + np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]: + with tm.assertRaises(ValueError): + p + o def test_add_offset_nat(self): # freq is DateOffset - p = Period('NaT', freq='A') - for o in [offsets.YearEnd(2)]: - self.assertEqual((p + o).ordinal, tslib.iNaT) - - for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), - np.timedelta64(365, 'D'), timedelta(365)]: - with tm.assertRaises(ValueError): - p + o - - p = Period('NaT', freq='M') - for o in [offsets.MonthEnd(2), offsets.MonthEnd(12)]: - self.assertEqual((p + o).ordinal, tslib.iNaT) - - for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), - np.timedelta64(365, 'D'), timedelta(365)]: - with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'): - p + o + for freq in ['A', '2A', '3A']: + p = Period('NaT', freq=freq) + for o in [offsets.YearEnd(2)]: + self.assertEqual((p + o).ordinal, tslib.iNaT) + + for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), + np.timedelta64(365, 'D'), timedelta(365)]: + with tm.assertRaises(ValueError): + p + o + + for freq in ['M', '2M', '3M']: + p = Period('NaT', freq=freq) + for o in [offsets.MonthEnd(2), offsets.MonthEnd(12)]: + self.assertEqual((p + o).ordinal, tslib.iNaT) + + for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), + np.timedelta64(365, 'D'), timedelta(365)]: + with tm.assertRaises(ValueError): + p + o # freq is Tick - p = Period('NaT', freq='D') - for o in [offsets.Day(5), offsets.Hour(24), np.timedelta64(2, 'D'), - np.timedelta64(3600 * 24, 's'), timedelta(-2), timedelta(hours=48)]: - self.assertEqual((p + o).ordinal, tslib.iNaT) - - for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), - np.timedelta64(4, 'h'), timedelta(hours=23)]: - with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'): - p + o - - p = Period('NaT', freq='H') - for o in [offsets.Day(2), offsets.Hour(3), np.timedelta64(3, 'h'), - np.timedelta64(3600, 's'), timedelta(minutes=120), - timedelta(days=4, minutes=180)]: - self.assertEqual((p + o).ordinal, tslib.iNaT) - - for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), - np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]: - with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'): - p + o + for freq in ['D', '2D', '3D']: + p = Period('NaT', freq=freq) + for o in [offsets.Day(5), offsets.Hour(24), np.timedelta64(2, 'D'), + np.timedelta64(3600 * 24, 's'), timedelta(-2), timedelta(hours=48)]: + self.assertEqual((p + o).ordinal, tslib.iNaT) + + for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), + np.timedelta64(4, 'h'), timedelta(hours=23)]: + with tm.assertRaises(ValueError): + p + o + + for freq in ['H', '2H', '3H']: + p = Period('NaT', freq=freq) + for o in [offsets.Day(2), offsets.Hour(3), np.timedelta64(3, 'h'), + np.timedelta64(3600, 's'), timedelta(minutes=120), + timedelta(days=4, minutes=180)]: + self.assertEqual((p + o).ordinal, tslib.iNaT) + + for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), + np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]: + with tm.assertRaises(ValueError): + p + o def test_sub_offset(self): # freq is DateOffset - p = Period('2011', freq='A') - self.assertEqual(p - offsets.YearEnd(2), Period('2009', freq='A')) + for freq in ['A', '2A', '3A']: + p = Period('2011', freq=freq) + self.assertEqual(p - offsets.YearEnd(2), Period('2009', freq=freq)) - for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), - np.timedelta64(365, 'D'), timedelta(365)]: - with tm.assertRaises(ValueError): - p - o + for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), + np.timedelta64(365, 'D'), timedelta(365)]: + with tm.assertRaises(ValueError): + p - o - p = Period('2011-03', freq='M') - self.assertEqual(p - offsets.MonthEnd(2), Period('2011-01', freq='M')) - self.assertEqual(p - offsets.MonthEnd(12), Period('2010-03', freq='M')) + for freq in ['M', '2M', '3M']: + p = Period('2011-03', freq=freq) + self.assertEqual(p - offsets.MonthEnd(2), Period('2011-01', freq=freq)) + self.assertEqual(p - offsets.MonthEnd(12), Period('2010-03', freq=freq)) - for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), - np.timedelta64(365, 'D'), timedelta(365)]: - with tm.assertRaises(ValueError): - p - o + for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), + np.timedelta64(365, 'D'), timedelta(365)]: + with tm.assertRaises(ValueError): + p - o # freq is Tick - p = Period('2011-04-01', freq='D') - self.assertEqual(p - offsets.Day(5), Period('2011-03-27', freq='D')) - self.assertEqual(p - offsets.Hour(24), Period('2011-03-31', freq='D')) - self.assertEqual(p - np.timedelta64(2, 'D'), Period('2011-03-30', freq='D')) - self.assertEqual(p - np.timedelta64(3600 * 24, 's'), Period('2011-03-31', freq='D')) - self.assertEqual(p - timedelta(-2), Period('2011-04-03', freq='D')) - self.assertEqual(p - timedelta(hours=48), Period('2011-03-30', freq='D')) - - for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), - np.timedelta64(4, 'h'), timedelta(hours=23)]: - with tm.assertRaises(ValueError): - p - o - - p = Period('2011-04-01 09:00', freq='H') - self.assertEqual(p - offsets.Day(2), Period('2011-03-30 09:00', freq='H')) - self.assertEqual(p - offsets.Hour(3), Period('2011-04-01 06:00', freq='H')) - self.assertEqual(p - np.timedelta64(3, 'h'), Period('2011-04-01 06:00', freq='H')) - self.assertEqual(p - np.timedelta64(3600, 's'), Period('2011-04-01 08:00', freq='H')) - self.assertEqual(p - timedelta(minutes=120), Period('2011-04-01 07:00', freq='H')) - self.assertEqual(p - timedelta(days=4, minutes=180), Period('2011-03-28 06:00', freq='H')) - - for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), - np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]: - with tm.assertRaises(ValueError): - p - o + for freq in ['D', '2D', '3D']: + p = Period('2011-04-01', freq=freq) + self.assertEqual(p - offsets.Day(5), Period('2011-03-27', freq=freq)) + self.assertEqual(p - offsets.Hour(24), Period('2011-03-31', freq=freq)) + self.assertEqual(p - np.timedelta64(2, 'D'), Period('2011-03-30', freq=freq)) + self.assertEqual(p - np.timedelta64(3600 * 24, 's'), Period('2011-03-31', freq=freq)) + self.assertEqual(p - timedelta(-2), Period('2011-04-03', freq=freq)) + self.assertEqual(p - timedelta(hours=48), Period('2011-03-30', freq=freq)) + + for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), + np.timedelta64(4, 'h'), timedelta(hours=23)]: + with tm.assertRaises(ValueError): + p - o + + for freq in ['H', '2H', '3H']: + p = Period('2011-04-01 09:00', freq=freq) + self.assertEqual(p - offsets.Day(2), Period('2011-03-30 09:00', freq=freq)) + self.assertEqual(p - offsets.Hour(3), Period('2011-04-01 06:00', freq=freq)) + self.assertEqual(p - np.timedelta64(3, 'h'), Period('2011-04-01 06:00', freq=freq)) + self.assertEqual(p - np.timedelta64(3600, 's'), Period('2011-04-01 08:00', freq=freq)) + self.assertEqual(p - timedelta(minutes=120), Period('2011-04-01 07:00', freq=freq)) + self.assertEqual(p - timedelta(days=4, minutes=180), Period('2011-03-28 06:00', freq=freq)) + + for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), + np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]: + with tm.assertRaises(ValueError): + p - o def test_sub_offset_nat(self): # freq is DateOffset - p = Period('NaT', freq='A') - for o in [offsets.YearEnd(2)]: - self.assertEqual((p - o).ordinal, tslib.iNaT) - - for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), - np.timedelta64(365, 'D'), timedelta(365)]: - with tm.assertRaises(ValueError): - p - o - - p = Period('NaT', freq='M') - for o in [offsets.MonthEnd(2), offsets.MonthEnd(12)]: - self.assertEqual((p - o).ordinal, tslib.iNaT) - - for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), - np.timedelta64(365, 'D'), timedelta(365)]: - with tm.assertRaises(ValueError): - p - o + for freq in ['A', '2A', '3A']: + p = Period('NaT', freq=freq) + for o in [offsets.YearEnd(2)]: + self.assertEqual((p - o).ordinal, tslib.iNaT) + + for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), + np.timedelta64(365, 'D'), timedelta(365)]: + with tm.assertRaises(ValueError): + p - o + + for freq in ['M', '2M', '3M']: + p = Period('NaT', freq=freq) + for o in [offsets.MonthEnd(2), offsets.MonthEnd(12)]: + self.assertEqual((p - o).ordinal, tslib.iNaT) + + for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), + np.timedelta64(365, 'D'), timedelta(365)]: + with tm.assertRaises(ValueError): + p - o # freq is Tick - p = Period('NaT', freq='D') - for o in [offsets.Day(5), offsets.Hour(24), np.timedelta64(2, 'D'), - np.timedelta64(3600 * 24, 's'), timedelta(-2), timedelta(hours=48)]: - self.assertEqual((p - o).ordinal, tslib.iNaT) - - for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), - np.timedelta64(4, 'h'), timedelta(hours=23)]: - with tm.assertRaises(ValueError): - p - o - - p = Period('NaT', freq='H') - for o in [offsets.Day(2), offsets.Hour(3), np.timedelta64(3, 'h'), - np.timedelta64(3600, 's'), timedelta(minutes=120), - timedelta(days=4, minutes=180)]: - self.assertEqual((p - o).ordinal, tslib.iNaT) - - for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), - np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]: - with tm.assertRaises(ValueError): - p - o + for freq in ['D', '2D', '3D']: + p = Period('NaT', freq=freq) + for o in [offsets.Day(5), offsets.Hour(24), np.timedelta64(2, 'D'), + np.timedelta64(3600 * 24, 's'), timedelta(-2), timedelta(hours=48)]: + self.assertEqual((p - o).ordinal, tslib.iNaT) + + for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), + np.timedelta64(4, 'h'), timedelta(hours=23)]: + with tm.assertRaises(ValueError): + p - o + + for freq in ['H', '2H', '3H']: + p = Period('NaT', freq=freq) + for o in [offsets.Day(2), offsets.Hour(3), np.timedelta64(3, 'h'), + np.timedelta64(3600, 's'), timedelta(minutes=120), + timedelta(days=4, minutes=180)]: + self.assertEqual((p - o).ordinal, tslib.iNaT) + + for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), + np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]: + with tm.assertRaises(ValueError): + p - o def test_nat_ops(self): - p = Period('NaT', freq='M') - self.assertEqual((p + 1).ordinal, tslib.iNaT) - self.assertEqual((p - 1).ordinal, tslib.iNaT) - self.assertEqual((p - Period('2011-01', freq='M')).ordinal, tslib.iNaT) - self.assertEqual((Period('2011-01', freq='M') - p).ordinal, tslib.iNaT) + for freq in ['M', '2M', '3M']: + p = Period('NaT', freq=freq) + self.assertEqual((p + 1).ordinal, tslib.iNaT) + self.assertEqual((p - 1).ordinal, tslib.iNaT) + self.assertEqual((p - Period('2011-01', freq=freq)).ordinal, tslib.iNaT) + self.assertEqual((Period('2011-01', freq=freq) - p).ordinal, tslib.iNaT) def test_pi_ops_nat(self): idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M', name='idx') @@ -3042,27 +3481,112 @@ def test_period_nat_comp(self): self.assertEqual(left <= right, False) self.assertEqual(left >= right, False) - def test_pi_nat_comp(self): - idx1 = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-05'], freq='M') + def test_pi_pi_comp(self): + + for freq in ['M', '2M', '3M']: + base = PeriodIndex(['2011-01', '2011-02', + '2011-03', '2011-04'], freq=freq) + p = Period('2011-02', freq=freq) + + exp = np.array([False, True, False, False]) + self.assert_numpy_array_equal(base == p, exp) + + exp = np.array([True, False, True, True]) + self.assert_numpy_array_equal(base != p, exp) + + exp = np.array([False, False, True, True]) + self.assert_numpy_array_equal(base > p, exp) + + exp = np.array([True, False, False, False]) + self.assert_numpy_array_equal(base < p, exp) + + exp = np.array([False, True, True, True]) + self.assert_numpy_array_equal(base >= p, exp) - result = idx1 > Period('2011-02', freq='M') - self.assert_numpy_array_equal(result, np.array([False, False, False, True])) + exp = np.array([True, True, False, False]) + self.assert_numpy_array_equal(base <= p, exp) - result = idx1 == Period('NaT', freq='M') - self.assert_numpy_array_equal(result, np.array([False, False, False, False])) + idx = PeriodIndex(['2011-02', '2011-01', '2011-03', '2011-05'], freq=freq) - result = idx1 != Period('NaT', freq='M') - self.assert_numpy_array_equal(result, np.array([True, True, True, True])) + exp = np.array([False, False, True, False]) + self.assert_numpy_array_equal(base == idx, exp) - idx2 = PeriodIndex(['2011-02', '2011-01', '2011-04', 'NaT'], freq='M') - result = idx1 < idx2 - self.assert_numpy_array_equal(result, np.array([True, False, False, False])) + exp = np.array([True, True, False, True]) + self.assert_numpy_array_equal(base != idx, exp) - result = idx1 == idx1 - self.assert_numpy_array_equal(result, np.array([True, True, False, True])) + exp = np.array([False, True, False, False]) + self.assert_numpy_array_equal(base > idx, exp) - result = idx1 != idx1 - self.assert_numpy_array_equal(result, np.array([False, False, True, False])) + exp = np.array([True, False, False, True]) + self.assert_numpy_array_equal(base < idx, exp) + + exp = np.array([False, True, True, False]) + self.assert_numpy_array_equal(base >= idx, exp) + + exp = np.array([True, False, True, True]) + self.assert_numpy_array_equal(base <= idx, exp) + + # different base freq + msg = "Input has different freq=A-DEC from PeriodIndex" + with tm.assertRaisesRegexp(ValueError, msg): + base <= Period('2011', freq='A') + + with tm.assertRaisesRegexp(ValueError, msg): + idx = PeriodIndex(['2011', '2012', '2013', '2014'], freq='A') + base <= idx + + # different mult + msg = "Input has different freq=4M from PeriodIndex" + with tm.assertRaisesRegexp(ValueError, msg): + base <= Period('2011', freq='4M') + + with tm.assertRaisesRegexp(ValueError, msg): + idx = PeriodIndex(['2011', '2012', '2013', '2014'], freq='4M') + base <= idx + + def test_pi_nat_comp(self): + for freq in ['M', '2M', '3M']: + idx1 = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-05'], freq=freq) + + result = idx1 > Period('2011-02', freq=freq) + exp = np.array([False, False, False, True]) + self.assert_numpy_array_equal(result, exp) + + result = idx1 == Period('NaT', freq=freq) + exp = np.array([False, False, False, False]) + self.assert_numpy_array_equal(result, exp) + + result = idx1 != Period('NaT', freq=freq) + exp = np.array([True, True, True, True]) + self.assert_numpy_array_equal(result, exp) + + idx2 = PeriodIndex(['2011-02', '2011-01', '2011-04', 'NaT'], freq=freq) + result = idx1 < idx2 + exp = np.array([True, False, False, False]) + self.assert_numpy_array_equal(result, exp) + + result = idx1 == idx2 + exp = np.array([False, False, False, False]) + self.assert_numpy_array_equal(result, exp) + + result = idx1 != idx2 + exp = np.array([True, True, True, True]) + self.assert_numpy_array_equal(result, exp) + + result = idx1 == idx1 + exp = np.array([True, True, False, True]) + self.assert_numpy_array_equal(result, exp) + + result = idx1 != idx1 + exp = np.array([False, False, True, False]) + self.assert_numpy_array_equal(result, exp) + + diff = PeriodIndex(['2011-02', '2011-01', '2011-04', 'NaT'], freq='4M') + msg = "Input has different freq=4M from PeriodIndex" + with tm.assertRaisesRegexp(ValueError, msg): + idx1 > diff + with tm.assertRaisesRegexp(ValueError, msg): + idx1 == diff if __name__ == '__main__': diff --git a/pandas/tseries/tests/test_plotting.py b/pandas/tseries/tests/test_plotting.py index 08a4056c1fce2..d9b31c0a1d620 100644 --- a/pandas/tseries/tests/test_plotting.py +++ b/pandas/tseries/tests/test_plotting.py @@ -9,7 +9,7 @@ from pandas import Index, Series, DataFrame from pandas.tseries.index import date_range, bdate_range -from pandas.tseries.offsets import DateOffset +from pandas.tseries.offsets import DateOffset, Week from pandas.tseries.period import period_range, Period, PeriodIndex from pandas.tseries.resample import DatetimeIndex @@ -758,7 +758,7 @@ def test_to_weekly_resampling(self): high.plot() ax = low.plot() for l in ax.get_lines(): - self.assertTrue(PeriodIndex(data=l.get_xdata()).freq.startswith('W')) + self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, idxh.freq) # tsplot from pandas.tseries.plotting import tsplot @@ -767,7 +767,7 @@ def test_to_weekly_resampling(self): tsplot(high, plt.Axes.plot) lines = tsplot(low, plt.Axes.plot) for l in lines: - self.assertTrue(PeriodIndex(data=l.get_xdata()).freq.startswith('W')) + self.assertTrue(PeriodIndex(data=l.get_xdata()).freq, idxh.freq) @slow def test_from_weekly_resampling(self): @@ -782,7 +782,7 @@ def test_from_weekly_resampling(self): expected_l = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544, 1549, 1553, 1558, 1562]) for l in ax.get_lines(): - self.assertTrue(PeriodIndex(data=l.get_xdata()).freq.startswith('W')) + self.assertTrue(PeriodIndex(data=l.get_xdata()).freq, idxh.freq) xdata = l.get_xdata(orig=False) if len(xdata) == 12: # idxl lines self.assert_numpy_array_equal(xdata, expected_l) @@ -796,9 +796,8 @@ def test_from_weekly_resampling(self): tsplot(low, plt.Axes.plot) lines = tsplot(high, plt.Axes.plot) - for l in lines: - self.assertTrue(PeriodIndex(data=l.get_xdata()).freq.startswith('W')) + self.assertTrue(PeriodIndex(data=l.get_xdata()).freq, idxh.freq) xdata = l.get_xdata(orig=False) if len(xdata) == 12: # idxl lines self.assert_numpy_array_equal(xdata, expected_l) @@ -825,7 +824,7 @@ def test_from_resampling_area_line_mixed(self): expected_y = np.zeros(len(expected_x)) for i in range(3): l = ax.lines[i] - self.assertTrue(PeriodIndex(data=l.get_xdata()).freq.startswith('W')) + self.assertEqual(PeriodIndex(l.get_xdata()).freq, idxh.freq) self.assert_numpy_array_equal(l.get_xdata(orig=False), expected_x) # check stacked values are correct expected_y += low[i].values @@ -836,7 +835,7 @@ def test_from_resampling_area_line_mixed(self): expected_y = np.zeros(len(expected_x)) for i in range(3): l = ax.lines[3 + i] - self.assertTrue(PeriodIndex(data=l.get_xdata()).freq.startswith('W')) + self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, idxh.freq) self.assert_numpy_array_equal(l.get_xdata(orig=False), expected_x) expected_y += high[i].values self.assert_numpy_array_equal(l.get_ydata(orig=False), expected_y) @@ -851,7 +850,7 @@ def test_from_resampling_area_line_mixed(self): expected_y = np.zeros(len(expected_x)) for i in range(3): l = ax.lines[i] - self.assertTrue(PeriodIndex(data=l.get_xdata()).freq.startswith('W')) + self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, idxh.freq) self.assert_numpy_array_equal(l.get_xdata(orig=False), expected_x) expected_y += high[i].values self.assert_numpy_array_equal(l.get_ydata(orig=False), expected_y) @@ -862,7 +861,7 @@ def test_from_resampling_area_line_mixed(self): expected_y = np.zeros(len(expected_x)) for i in range(3): l = ax.lines[3 + i] - self.assertTrue(PeriodIndex(data=l.get_xdata()).freq.startswith('W')) + self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, idxh.freq) self.assert_numpy_array_equal(l.get_xdata(orig=False), expected_x) expected_y += low[i].values self.assert_numpy_array_equal(l.get_ydata(orig=False), expected_y) diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py index efd1ff9ba34fd..521679f21dc93 100644 --- a/pandas/tseries/tools.py +++ b/pandas/tseries/tools.py @@ -449,6 +449,10 @@ def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None): if not isinstance(arg, compat.string_types): return arg + from pandas.tseries.offsets import DateOffset + if isinstance(freq, DateOffset): + freq = freq.rule_code + if dayfirst is None: dayfirst = get_option("display.date_dayfirst") if yearfirst is None: diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index 77ac362181a2b..a914eb992d88f 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -1799,6 +1799,19 @@ _MONTH_ALIASES = dict((k + 1, v) for k, v in enumerate(_MONTHS)) cpdef object _get_rule_month(object source, object default='DEC'): + """ + Return starting month of given freq, default is December. + + Example + ------- + >>> _get_rule_month('D') + 'DEC' + + >>> _get_rule_month('A-JAN') + 'JAN' + """ + if hasattr(source, 'freqstr'): + source = source.freqstr source = source.upper() if '-' not in source: return default
Closes #7811. - [x] Change `Period.freq` and `PeriodIndex.freq` to store offsets. - [x] Add `freqstr` to `Period`, `PeriodIndex` can use `DatetimeIndexOpsMixin`'s logic - [x] Logic and tests for pickles taken in prev versions - [x] Perform shift/arithmetic considering freq's mult. - [x] Test all offsets has accessible `n` proeprties (#10350) - [x] Explicit tests for `asfreq` and `to_timestamp` using freq with mult. - [x] Ops with different base freq must be prohibited. Change freq comparison to base comparison. - [x] Fix partial string slicing bug (some tests are commented out because of this) - [x] Fix order bug (#10295, removing temp logic for `PeriodIndex`) - ~~Decide a policy for legacy freq aliases, like "WK"~~ (handled in #10878) - [x] Update doc.
https://api.github.com/repos/pandas-dev/pandas/pulls/7832
2014-07-24T13:47:39Z
2015-09-03T14:04:18Z
2015-09-03T14:04:18Z
2015-09-03T14:14:34Z
CI: fix typos in readme
diff --git a/ci/README.txt b/ci/README.txt index f69fc832fde85..bb71dc25d6093 100644 --- a/ci/README.txt +++ b/ci/README.txt @@ -1,15 +1,15 @@ -Travis is a ci service that's well-integrated with github. -The following ypes of breakage should be detected -by travis builds: +Travis is a ci service that's well-integrated with GitHub. +The following types of breakage should be detected +by Travis builds: -1) Failing tests on any supported version of python. +1) Failing tests on any supported version of Python. 2) Pandas should install and the tests should run if no optional deps are installed. That also means tests which rely on optional deps need to raise SkipTest() if the dep is missing. 3) unicode related fails when running under exotic locales. We tried running the vbench suite for a while, but with varying load -on travis machines, that wasn't useful. +on Travis machines, that wasn't useful. Travis currently (4/2013) has a 5-job concurrency limit. Exceeding it basically doubles the total runtime for a commit through travis, and
This readme file had some problems with it. I cleaned it up a bit.
https://api.github.com/repos/pandas-dev/pandas/pulls/7831
2014-07-24T13:33:18Z
2014-07-24T17:28:15Z
2014-07-24T17:28:15Z
2014-07-24T17:28:19Z
BUG: Bug in passing a DatetimeIndex with a timezone that was not being retained in Frame construction (GH7822)
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index 5e0af498557f2..e8daf41764a70 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -58,6 +58,27 @@ API changes rolling_min(s, window=10, min_periods=5) +- Bug in passing a ``DatetimeIndex`` with a timezone that was not being retained in DataFrame construction from a dict (:issue:`7822`) + + In prior versions this would drop the timezone. + + .. ipython:: python + + i = date_range('1/1/2011', periods=3, freq='10s', tz = 'US/Eastern') + i + df = DataFrame( {'a' : i } ) + df + df.dtypes + + This behavior is unchanged. + + .. ipython:: python + + df = DataFrame( ) + df['a'] = i + df + df.dtypes + .. _whatsnew_0150.cat: Categoricals in Series/DataFrame diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 7b005867a404f..636dedfbeb7b7 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2146,19 +2146,13 @@ def reindexer(value): value = value.copy() elif (isinstance(value, Index) or _is_sequence(value)): - if len(value) != len(self.index): - raise ValueError('Length of values does not match length of ' - 'index') - + from pandas.core.series import _sanitize_index + value = _sanitize_index(value, self.index, copy=False) if not isinstance(value, (np.ndarray, Index)): if isinstance(value, list) and len(value) > 0: value = com._possibly_convert_platform(value) else: value = com._asarray_tuplesafe(value) - elif isinstance(value, PeriodIndex): - value = value.asobject - elif isinstance(value, DatetimeIndex): - value = value._to_embed(keep_tz=True).copy() elif value.ndim == 2: value = value.copy().T else: diff --git a/pandas/core/series.py b/pandas/core/series.py index 9abc8f22009b3..502c01ce6d1d1 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2431,8 +2431,26 @@ def remove_na(series): return series[notnull(_values_from_object(series))] +def _sanitize_index(data, index, copy=False): + """ sanitize an index type to return an ndarray of the underlying, pass thru a non-Index """ + + if len(data) != len(index): + raise ValueError('Length of values does not match length of ' + 'index') + + if isinstance(data, PeriodIndex): + data = data.asobject + elif isinstance(data, DatetimeIndex): + data = data._to_embed(keep_tz=True) + if copy: + data = data.copy() + + return data + def _sanitize_array(data, index, dtype=None, copy=False, raise_cast_failure=False): + """ sanitize input data to an ndarray, copy if specified, coerce to the dtype if specified """ + if dtype is not None: dtype = np.dtype(dtype) @@ -2482,11 +2500,13 @@ def _try_cast(arr, take_fast_path): raise TypeError('Cannot cast datetime64 to %s' % dtype) else: subarr = _try_cast(data, True) - else: + elif isinstance(data, Index): # don't coerce Index types # e.g. indexes can have different conversions (so don't fast path them) # GH 6140 - subarr = _try_cast(data, not isinstance(data, Index)) + subarr = _sanitize_index(data, index, copy=True) + else: + subarr = _try_cast(data, True) if copy: subarr = data.copy() diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index c4783bc49f0ce..0dd729d58f174 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -3599,6 +3599,23 @@ def test_constructor_with_datetimes(self): self.assertEqual(df.iat[0,0],dt) assert_series_equal(df.dtypes,Series({'End Date' : np.dtype('object') })) + # GH 7822 + # preserver an index with a tz on dict construction + i = date_range('1/1/2011', periods=5, freq='10s', tz = 'US/Eastern') + + expected = DataFrame( {'a' : i.to_series(keep_tz=True).reset_index(drop=True) }) + df = DataFrame() + df['a'] = i + assert_frame_equal(df, expected) + df = DataFrame( {'a' : i } ) + assert_frame_equal(df, expected) + + # multiples + i_no_tz = date_range('1/1/2011', periods=5, freq='10s') + df = DataFrame( {'a' : i, 'b' : i_no_tz } ) + expected = DataFrame( {'a' : i.to_series(keep_tz=True).reset_index(drop=True), 'b': i_no_tz }) + assert_frame_equal(df, expected) + def test_constructor_for_list_with_dtypes(self): intname = np.dtype(np.int_).name floatname = np.dtype(np.float_).name diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py index 531724cdb6837..21f915cb50e21 100644 --- a/pandas/tseries/tests/test_timezones.py +++ b/pandas/tseries/tests/test_timezones.py @@ -598,10 +598,13 @@ def test_to_datetime_tzlocal(self): def test_frame_no_datetime64_dtype(self): + # after 7822 + # these retain the timezones on dict construction + dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI') dr_tz = dr.tz_localize(self.tzstr('US/Eastern')) e = DataFrame({'A': 'foo', 'B': dr_tz}, index=dr) - self.assertEqual(e['B'].dtype, 'M8[ns]') + self.assertEqual(e['B'].dtype, 'O') # GH 2810 (with timezones) datetimes_naive = [ ts.to_pydatetime() for ts in dr ] @@ -610,7 +613,7 @@ def test_frame_no_datetime64_dtype(self): 'datetimes_naive': datetimes_naive, 'datetimes_with_tz' : datetimes_with_tz }) result = df.get_dtype_counts() - expected = Series({ 'datetime64[ns]' : 3, 'object' : 1 }) + expected = Series({ 'datetime64[ns]' : 2, 'object' : 2 }) tm.assert_series_equal(result, expected) def test_hongkong_tz_convert(self):
closes #7822
https://api.github.com/repos/pandas-dev/pandas/pulls/7823
2014-07-23T15:45:55Z
2014-07-23T17:01:23Z
2014-07-23T17:01:23Z
2014-07-23T17:01:23Z
BUG: Fixed failure in StataReader when reading variable labels in 117
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index 06c93541a7783..2322af4752e2e 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -212,7 +212,7 @@ Bug Fixes - Bug in ``DataFrame.plot`` with ``subplots=True`` may draw unnecessary minor xticks and yticks (:issue:`7801`) - +- Bug in ``StataReader`` which did not read variable labels in 117 files due to difference between Stata documentation and implementation (:issue:`7816`) diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 48a5f5ee6c994..3458a95ac096d 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -520,8 +520,15 @@ def _read_header(self): self.byteorder + 'q', self.path_or_buf.read(8))[0] + 9 seek_value_label_names = struct.unpack( self.byteorder + 'q', self.path_or_buf.read(8))[0] + 19 - seek_variable_labels = struct.unpack( - self.byteorder + 'q', self.path_or_buf.read(8))[0] + 17 + # Stata 117 data files do not follow the described format. This is + # a work around that uses the previous label, 33 bytes for each + # variable, 20 for the closing tag and 17 for the opening tag + self.path_or_buf.read(8) # <variable_lables>, throw away + seek_variable_labels = seek_value_label_names + (33*self.nvar) + 20 + 17 + # Below is the original, correct code (per Stata sta format doc, + # although this is not followed in actual 117 dtas) + #seek_variable_labels = struct.unpack( + # self.byteorder + 'q', self.path_or_buf.read(8))[0] + 17 self.path_or_buf.read(8) # <characteristics> self.data_location = struct.unpack( self.byteorder + 'q', self.path_or_buf.read(8))[0] + 6 diff --git a/pandas/io/tests/data/stata7_115.dta b/pandas/io/tests/data/stata7_115.dta new file mode 100644 index 0000000000000..133713b201ba8 Binary files /dev/null and b/pandas/io/tests/data/stata7_115.dta differ diff --git a/pandas/io/tests/data/stata7_117.dta b/pandas/io/tests/data/stata7_117.dta new file mode 100644 index 0000000000000..c001478fc902d Binary files /dev/null and b/pandas/io/tests/data/stata7_117.dta differ diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py index 435226bc4313f..5271604235922 100644 --- a/pandas/io/tests/test_stata.py +++ b/pandas/io/tests/test_stata.py @@ -68,6 +68,9 @@ def setUp(self): self.dta15_115 = os.path.join(self.dirpath, 'stata6_115.dta') self.dta15_117 = os.path.join(self.dirpath, 'stata6_117.dta') + self.dta16_115 = os.path.join(self.dirpath, 'stata7_115.dta') + self.dta16_117 = os.path.join(self.dirpath, 'stata7_117.dta') + def read_dta(self, file): return read_stata(file, convert_dates=True) @@ -199,7 +202,7 @@ def test_read_dta4(self): 'labeled_with_missings', 'float_labelled']) # these are all categoricals - expected = pd.concat([ Series(pd.Categorical(value)) for col, value in expected.iteritems() ],axis=1) + expected = pd.concat([ Series(pd.Categorical(value)) for col, value in compat.iteritems(expected)],axis=1) tm.assert_frame_equal(parsed_113, expected) tm.assert_frame_equal(parsed_114, expected) @@ -551,6 +554,18 @@ def test_bool_uint(self): written_and_read_again = written_and_read_again.set_index('index') tm.assert_frame_equal(written_and_read_again, expected) + def test_variable_labels(self): + sr_115 = StataReader(self.dta16_115).variable_labels() + sr_117 = StataReader(self.dta16_117).variable_labels() + keys = ('var1', 'var2', 'var3') + labels = ('label1', 'label2', 'label3') + for k,v in compat.iteritems(sr_115): + self.assertTrue(k in sr_117) + self.assertTrue(v == sr_117[k]) + self.assertTrue(k in keys) + self.assertTrue(v in labels) + + if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False)
Stata's implementation does not match the online dta file format description. The solution used here is to directly compute the offset rather than reading it from the dta file. If Stata fixes their implementation, the original code can be restored. closes #7816
https://api.github.com/repos/pandas-dev/pandas/pulls/7818
2014-07-22T17:24:49Z
2014-07-23T13:40:44Z
2014-07-23T13:40:44Z
2014-08-20T15:32:51Z
TST: add tests for GH 6572
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index 1614261542733..b6761426edc5d 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -2181,6 +2181,29 @@ def test_constructor_coverage(self): end='2011-01-01', freq='B') self.assertRaises(ValueError, DatetimeIndex, periods=10, freq='D') + def test_constructor_datetime64_tzformat(self): + # GH 6572 + tm._skip_if_no_pytz() + tm._skip_if_no_dateutil() + from dateutil.tz import tzoffset + for freq in ['AS', 'W-SUN']: + idx = date_range('2013-01-01T00:00:00-05:00', '2016-01-01T23:59:59-05:00', freq=freq) + expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59', + freq=freq, tz=tzoffset(None, -18000)) + tm.assert_index_equal(idx, expected) + # Unable to use `US/Eastern` because of DST + expected_i8 = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59', + freq=freq, tz='America/Lima') + self.assert_numpy_array_equal(idx.asi8, expected_i8.asi8) + + idx = date_range('2013-01-01T00:00:00+09:00', '2016-01-01T23:59:59+09:00', freq=freq) + expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59', + freq=freq, tz=tzoffset(None, 32400)) + tm.assert_index_equal(idx, expected) + expected_i8 = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59', + freq=freq, tz='Asia/Tokyo') + self.assert_numpy_array_equal(idx.asi8, expected_i8.asi8) + def test_constructor_name(self): idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A', name='TEST')
Closes #6572. It seems to be fixed by #7465 as a side effect.
https://api.github.com/repos/pandas-dev/pandas/pulls/7810
2014-07-20T14:52:20Z
2014-07-21T11:43:55Z
2014-07-21T11:43:55Z
2014-07-23T11:08:16Z
ENH/CLN: add HistPlot class inheriting MPLPlot
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index e842b73664e6c..06dba0979c7eb 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -168,6 +168,8 @@ previously results in ``Exception`` or ``TypeError`` (:issue:`7812`) - ``Timestamp.__repr__`` displays ``dateutil.tz.tzoffset`` info (:issue:`7907`) +- Histogram from ``DataFrame.plot`` with ``kind='hist'`` (:issue:`7809`), See :ref:`the docs<visualization.hist>`. + .. _whatsnew_0150.dt: .dt accessor diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst index 69e04483cb47d..40b5d7c1599c1 100644 --- a/doc/source/visualization.rst +++ b/doc/source/visualization.rst @@ -123,6 +123,7 @@ a handful of values for plots other than the default Line plot. These include: * :ref:`'bar' <visualization.barplot>` or :ref:`'barh' <visualization.barplot>` for bar plots +* :ref:`'hist' <visualization.hist>` for histogram * :ref:`'kde' <visualization.kde>` or ``'density'`` for density plots * :ref:`'area' <visualization.area_plot>` for area plots * :ref:`'scatter' <visualization.scatter_matrix>` for scatter plots @@ -205,6 +206,46 @@ To get horizontal bar plots, pass ``kind='barh'``: Histograms ~~~~~~~~~~ + +.. versionadded:: 0.15.0 + +Histogram can be drawn specifying ``kind='hist'``. + +.. ipython:: python + + df4 = DataFrame({'a': randn(1000) + 1, 'b': randn(1000), + 'c': randn(1000) - 1}, columns=['a', 'b', 'c']) + + plt.figure(); + + @savefig hist_new.png + df4.plot(kind='hist', alpha=0.5) + +Histogram can be stacked by ``stacked=True``. Bin size can be changed by ``bins`` keyword. + +.. ipython:: python + + plt.figure(); + + @savefig hist_new_stacked.png + df4.plot(kind='hist', stacked=True, bins=20) + +You can pass other keywords supported by matplotlib ``hist``. For example, horizontal and cumulative histgram can be drawn by ``orientation='horizontal'`` and ``cumulative='True'``. + +.. ipython:: python + + plt.figure(); + + @savefig hist_new_kwargs.png + df4['a'].plot(kind='hist', orientation='horizontal', cumulative=True) + + +See the :meth:`hist <matplotlib.axes.Axes.hist>` method and the +`matplotlib hist documenation <http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hist>`__ for more. + + +The previous interface ``DataFrame.hist`` to plot histogram still can be used. + .. ipython:: python plt.figure(); diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index 8dbcb8c542fb3..b3a92263370e8 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -452,7 +452,7 @@ def test_plot(self): _check_plot_works(self.ts.plot, kind='area', stacked=False) _check_plot_works(self.iseries.plot) - for kind in ['line', 'bar', 'barh', 'kde']: + for kind in ['line', 'bar', 'barh', 'kde', 'hist']: if not _ok_for_gaussian_kde(kind): continue _check_plot_works(self.series[:5].plot, kind=kind) @@ -616,7 +616,13 @@ def test_pie_series(self): self._check_text_labels(ax.texts, series.index) @slow - def test_hist(self): + def test_hist_df_kwargs(self): + df = DataFrame(np.random.randn(10, 2)) + ax = df.plot(kind='hist', bins=5) + self.assertEqual(len(ax.patches), 10) + + @slow + def test_hist_legacy(self): _check_plot_works(self.ts.hist) _check_plot_works(self.ts.hist, grid=False) _check_plot_works(self.ts.hist, figsize=(8, 10)) @@ -637,7 +643,7 @@ def test_hist(self): self.ts.hist(by=self.ts.index, figure=fig) @slow - def test_hist_bins(self): + def test_hist_bins_legacy(self): df = DataFrame(np.random.randn(10, 2)) ax = df.hist(bins=2)[0][0] self.assertEqual(len(ax.patches), 2) @@ -701,13 +707,25 @@ def test_plot_fails_when_ax_differs_from_figure(self): self.ts.hist(ax=ax1, figure=fig2) @slow - def test_kde(self): + def test_hist_kde(self): + ax = self.ts.plot(kind='hist', logy=True) + self._check_ax_scales(ax, yaxis='log') + xlabels = ax.get_xticklabels() + # ticks are values, thus ticklabels are blank + self._check_text_labels(xlabels, [''] * len(xlabels)) + ylabels = ax.get_yticklabels() + self._check_text_labels(ylabels, [''] * len(ylabels)) + tm._skip_if_no_scipy() _skip_if_no_scipy_gaussian_kde() _check_plot_works(self.ts.plot, kind='kde') _check_plot_works(self.ts.plot, kind='density') ax = self.ts.plot(kind='kde', logy=True) self._check_ax_scales(ax, yaxis='log') + xlabels = ax.get_xticklabels() + self._check_text_labels(xlabels, [''] * len(xlabels)) + ylabels = ax.get_yticklabels() + self._check_text_labels(ylabels, [''] * len(ylabels)) @slow def test_kde_kwargs(self): @@ -718,9 +736,29 @@ def test_kde_kwargs(self): _check_plot_works(self.ts.plot, kind='density', bw_method=.5, ind=linspace(-100,100,20)) ax = self.ts.plot(kind='kde', logy=True, bw_method=.5, ind=linspace(-100,100,20)) self._check_ax_scales(ax, yaxis='log') + self._check_text_labels(ax.yaxis.get_label(), 'Density') @slow - def test_kde_color(self): + def test_hist_kwargs(self): + ax = self.ts.plot(kind='hist', bins=5) + self.assertEqual(len(ax.patches), 5) + self._check_text_labels(ax.yaxis.get_label(), 'Degree') + tm.close() + + ax = self.ts.plot(kind='hist', orientation='horizontal') + self._check_text_labels(ax.xaxis.get_label(), 'Degree') + tm.close() + + ax = self.ts.plot(kind='hist', align='left', stacked=True) + tm.close() + + @slow + def test_hist_kde_color(self): + ax = self.ts.plot(kind='hist', logy=True, bins=10, color='b') + self._check_ax_scales(ax, yaxis='log') + self.assertEqual(len(ax.patches), 10) + self._check_colors(ax.patches, facecolors=['b'] * 10) + tm._skip_if_no_scipy() _skip_if_no_scipy_gaussian_kde() ax = self.ts.plot(kind='kde', logy=True, color='r') @@ -1611,7 +1649,7 @@ def test_boxplot_return_type(self): self._check_box_return_type(result, 'both') @slow - def test_kde(self): + def test_kde_df(self): tm._skip_if_no_scipy() _skip_if_no_scipy_gaussian_kde() df = DataFrame(randn(100, 4)) @@ -1630,7 +1668,122 @@ def test_kde(self): self._check_ax_scales(axes, yaxis='log') @slow - def test_hist(self): + def test_hist_df(self): + df = DataFrame(randn(100, 4)) + series = df[0] + + ax = _check_plot_works(df.plot, kind='hist') + expected = [com.pprint_thing(c) for c in df.columns] + self._check_legend_labels(ax, labels=expected) + + axes = _check_plot_works(df.plot, kind='hist', subplots=True, logy=True) + self._check_axes_shape(axes, axes_num=4, layout=(4, 1)) + self._check_ax_scales(axes, yaxis='log') + + axes = series.plot(kind='hist', rot=40) + self._check_ticks_props(axes, xrot=40, yrot=0) + tm.close() + + ax = series.plot(kind='hist', normed=True, cumulative=True, bins=4) + # height of last bin (index 5) must be 1.0 + self.assertAlmostEqual(ax.get_children()[5].get_height(), 1.0) + tm.close() + + ax = series.plot(kind='hist', cumulative=True, bins=4) + self.assertAlmostEqual(ax.get_children()[5].get_height(), 100.0) + tm.close() + + # if horizontal, yticklabels are rotated + axes = df.plot(kind='hist', rot=50, fontsize=8, orientation='horizontal') + self._check_ticks_props(axes, xrot=0, yrot=50, ylabelsize=8) + + def _check_box_coord(self, patches, expected_y=None, expected_h=None, + expected_x=None, expected_w=None): + result_y = np.array([p.get_y() for p in patches]) + result_height = np.array([p.get_height() for p in patches]) + result_x = np.array([p.get_x() for p in patches]) + result_width = np.array([p.get_width() for p in patches]) + + if expected_y is not None: + self.assert_numpy_array_equal(result_y, expected_y) + if expected_h is not None: + self.assert_numpy_array_equal(result_height, expected_h) + if expected_x is not None: + self.assert_numpy_array_equal(result_x, expected_x) + if expected_w is not None: + self.assert_numpy_array_equal(result_width, expected_w) + + @slow + def test_hist_df_coord(self): + normal_df = DataFrame({'A': np.repeat(np.array([1, 2, 3, 4, 5]), + np.array([10, 9, 8, 7, 6])), + 'B': np.repeat(np.array([1, 2, 3, 4, 5]), + np.array([8, 8, 8, 8, 8])), + 'C': np.repeat(np.array([1, 2, 3, 4, 5]), + np.array([6, 7, 8, 9, 10]))}, + columns=['A', 'B', 'C']) + + nan_df = DataFrame({'A': np.repeat(np.array([np.nan, 1, 2, 3, 4, 5]), + np.array([3, 10, 9, 8, 7, 6])), + 'B': np.repeat(np.array([1, np.nan, 2, 3, 4, 5]), + np.array([8, 3, 8, 8, 8, 8])), + 'C': np.repeat(np.array([1, 2, 3, np.nan, 4, 5]), + np.array([6, 7, 8, 3, 9, 10]))}, + columns=['A', 'B', 'C']) + + for df in [normal_df, nan_df]: + ax = df.plot(kind='hist', bins=5) + self._check_box_coord(ax.patches[:5], expected_y=np.array([0, 0, 0, 0, 0]), + expected_h=np.array([10, 9, 8, 7, 6])) + self._check_box_coord(ax.patches[5:10], expected_y=np.array([0, 0, 0, 0, 0]), + expected_h=np.array([8, 8, 8, 8, 8])) + self._check_box_coord(ax.patches[10:], expected_y=np.array([0, 0, 0, 0, 0]), + expected_h=np.array([6, 7, 8, 9, 10])) + + ax = df.plot(kind='hist', bins=5, stacked=True) + self._check_box_coord(ax.patches[:5], expected_y=np.array([0, 0, 0, 0, 0]), + expected_h=np.array([10, 9, 8, 7, 6])) + self._check_box_coord(ax.patches[5:10], expected_y=np.array([10, 9, 8, 7, 6]), + expected_h=np.array([8, 8, 8, 8, 8])) + self._check_box_coord(ax.patches[10:], expected_y=np.array([18, 17, 16, 15, 14]), + expected_h=np.array([6, 7, 8, 9, 10])) + + axes = df.plot(kind='hist', bins=5, stacked=True, subplots=True) + self._check_box_coord(axes[0].patches, expected_y=np.array([0, 0, 0, 0, 0]), + expected_h=np.array([10, 9, 8, 7, 6])) + self._check_box_coord(axes[1].patches, expected_y=np.array([0, 0, 0, 0, 0]), + expected_h=np.array([8, 8, 8, 8, 8])) + self._check_box_coord(axes[2].patches, expected_y=np.array([0, 0, 0, 0, 0]), + expected_h=np.array([6, 7, 8, 9, 10])) + + # horizontal + ax = df.plot(kind='hist', bins=5, orientation='horizontal') + self._check_box_coord(ax.patches[:5], expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([10, 9, 8, 7, 6])) + self._check_box_coord(ax.patches[5:10], expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([8, 8, 8, 8, 8])) + self._check_box_coord(ax.patches[10:], expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([6, 7, 8, 9, 10])) + + ax = df.plot(kind='hist', bins=5, stacked=True, orientation='horizontal') + self._check_box_coord(ax.patches[:5], expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([10, 9, 8, 7, 6])) + self._check_box_coord(ax.patches[5:10], expected_x=np.array([10, 9, 8, 7, 6]), + expected_w=np.array([8, 8, 8, 8, 8])) + self._check_box_coord(ax.patches[10:], expected_x=np.array([18, 17, 16, 15, 14]), + expected_w=np.array([6, 7, 8, 9, 10])) + + axes = df.plot(kind='hist', bins=5, stacked=True, + subplots=True, orientation='horizontal') + self._check_box_coord(axes[0].patches, expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([10, 9, 8, 7, 6])) + self._check_box_coord(axes[1].patches, expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([8, 8, 8, 8, 8])) + self._check_box_coord(axes[2].patches, expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([6, 7, 8, 9, 10])) + + @slow + def test_hist_df_legacy(self): _check_plot_works(self.hist_df.hist) # make sure layout is handled @@ -1849,7 +2002,7 @@ def test_plot_int_columns(self): @slow def test_df_legend_labels(self): - kinds = 'line', 'bar', 'barh', 'kde', 'area' + kinds = ['line', 'bar', 'barh', 'kde', 'area', 'hist'] df = DataFrame(rand(3, 3), columns=['a', 'b', 'c']) df2 = DataFrame(rand(3, 3), columns=['d', 'e', 'f']) df3 = DataFrame(rand(3, 3), columns=['g', 'h', 'i']) @@ -1927,7 +2080,7 @@ def test_legend_name(self): @slow def test_no_legend(self): - kinds = 'line', 'bar', 'barh', 'kde', 'area' + kinds = ['line', 'bar', 'barh', 'kde', 'area', 'hist'] df = DataFrame(rand(3, 3), columns=['a', 'b', 'c']) for kind in kinds: @@ -2019,6 +2172,56 @@ def test_area_colors(self): poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)] self._check_colors(poly, facecolors=rgba_colors) + @slow + def test_hist_colors(self): + default_colors = self.plt.rcParams.get('axes.color_cycle') + + df = DataFrame(randn(5, 5)) + ax = df.plot(kind='hist') + self._check_colors(ax.patches[::10], facecolors=default_colors[:5]) + tm.close() + + custom_colors = 'rgcby' + ax = df.plot(kind='hist', color=custom_colors) + self._check_colors(ax.patches[::10], facecolors=custom_colors) + tm.close() + + from matplotlib import cm + # Test str -> colormap functionality + ax = df.plot(kind='hist', colormap='jet') + rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5)) + self._check_colors(ax.patches[::10], facecolors=rgba_colors) + tm.close() + + # Test colormap functionality + ax = df.plot(kind='hist', colormap=cm.jet) + rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5)) + self._check_colors(ax.patches[::10], facecolors=rgba_colors) + tm.close() + + ax = df.ix[:, [0]].plot(kind='hist', color='DodgerBlue') + self._check_colors([ax.patches[0]], facecolors=['DodgerBlue']) + + @slow + def test_kde_colors(self): + from matplotlib import cm + + custom_colors = 'rgcby' + df = DataFrame(rand(5, 5)) + + ax = df.plot(kind='kde', color=custom_colors) + self._check_colors(ax.get_lines(), linecolors=custom_colors) + tm.close() + + ax = df.plot(kind='kde', colormap='jet') + rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df))) + self._check_colors(ax.get_lines(), linecolors=rgba_colors) + tm.close() + + ax = df.plot(kind='kde', colormap=cm.jet) + rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df))) + self._check_colors(ax.get_lines(), linecolors=rgba_colors) + def test_default_color_cycle(self): import matplotlib.pyplot as plt plt.rcParams['axes.color_cycle'] = list('rgbk') diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 5d85b68234f96..7d0eaea5b36d6 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -1359,58 +1359,6 @@ def _get_errorbars(self, label=None, index=None, xerr=True, yerr=True): return errors -class KdePlot(MPLPlot): - orientation = 'vertical' - - def __init__(self, data, bw_method=None, ind=None, **kwargs): - MPLPlot.__init__(self, data, **kwargs) - self.bw_method=bw_method - self.ind=ind - - def _make_plot(self): - from scipy.stats import gaussian_kde - from scipy import __version__ as spv - from distutils.version import LooseVersion - plotf = self.plt.Axes.plot - colors = self._get_colors() - for i, (label, y) in enumerate(self._iter_data()): - ax = self._get_ax(i) - style = self._get_style(i, label) - - label = com.pprint_thing(label) - - if LooseVersion(spv) >= '0.11.0': - gkde = gaussian_kde(y, bw_method=self.bw_method) - else: - gkde = gaussian_kde(y) - if self.bw_method is not None: - msg = ('bw_method was added in Scipy 0.11.0.' + - ' Scipy version in use is %s.' % spv) - warnings.warn(msg) - - sample_range = max(y) - min(y) - - if self.ind is None: - ind = np.linspace(min(y) - 0.5 * sample_range, - max(y) + 0.5 * sample_range, 1000) - else: - ind = self.ind - - ax.set_ylabel("Density") - - y = gkde.evaluate(ind) - kwds = self.kwds.copy() - kwds['label'] = label - self._maybe_add_color(colors, kwds, style, i) - if style is None: - args = (ax, ind, y) - else: - args = (ax, ind, y, style) - - newlines = plotf(*args, **kwds) - self._add_legend_handle(newlines[0], label) - - class ScatterPlot(MPLPlot): def __init__(self, data, x, y, **kwargs): MPLPlot.__init__(self, data, **kwargs) @@ -1903,6 +1851,119 @@ def orientation(self): raise NotImplementedError(self.kind) +class HistPlot(LinePlot): + + def __init__(self, data, bins=10, bottom=0, **kwargs): + self.bins = bins # use mpl default + self.bottom = bottom + # Do not call LinePlot.__init__ which may fill nan + MPLPlot.__init__(self, data, **kwargs) + + def _args_adjust(self): + if com.is_integer(self.bins): + # create common bin edge + values = np.ravel(self.data.values) + values = values[~com.isnull(values)] + + hist, self.bins = np.histogram(values, bins=self.bins, + range=self.kwds.get('range', None), + weights=self.kwds.get('weights', None)) + + if com.is_list_like(self.bottom): + self.bottom = np.array(self.bottom) + + def _get_plot_function(self): + def plotf(ax, y, style=None, column_num=None, **kwds): + if column_num == 0: + self._initialize_prior(len(self.bins) - 1) + y = y[~com.isnull(y)] + bottom = self._pos_prior + self.bottom + # ignore style + n, bins, patches = self.plt.Axes.hist(ax, y, bins=self.bins, + bottom=bottom, **kwds) + self._update_prior(n) + return patches + return plotf + + def _make_plot(self): + plotf = self._get_plot_function() + colors = self._get_colors() + for i, (label, y) in enumerate(self._iter_data()): + ax = self._get_ax(i) + style = self._get_style(i, label) + label = com.pprint_thing(label) + + kwds = self.kwds.copy() + kwds['label'] = label + self._maybe_add_color(colors, kwds, style, i) + + if style is not None: + kwds['style'] = style + + artists = plotf(ax, y, column_num=i, **kwds) + self._add_legend_handle(artists[0], label) + + def _post_plot_logic(self): + if self.orientation == 'horizontal': + for ax in self.axes: + ax.set_xlabel('Degree') + else: + for ax in self.axes: + ax.set_ylabel('Degree') + + @property + def orientation(self): + if self.kwds.get('orientation', None) == 'horizontal': + return 'horizontal' + else: + return 'vertical' + + +class KdePlot(HistPlot): + orientation = 'vertical' + + def __init__(self, data, bw_method=None, ind=None, **kwargs): + MPLPlot.__init__(self, data, **kwargs) + self.bw_method = bw_method + self.ind = ind + + def _args_adjust(self): + pass + + def _get_ind(self, y): + if self.ind is None: + sample_range = max(y) - min(y) + ind = np.linspace(min(y) - 0.5 * sample_range, + max(y) + 0.5 * sample_range, 1000) + else: + ind = self.ind + return ind + + def _get_plot_function(self): + from scipy.stats import gaussian_kde + from scipy import __version__ as spv + f = MPLPlot._get_plot_function(self) + def plotf(ax, y, style=None, column_num=None, **kwds): + if LooseVersion(spv) >= '0.11.0': + gkde = gaussian_kde(y, bw_method=self.bw_method) + else: + gkde = gaussian_kde(y) + if self.bw_method is not None: + msg = ('bw_method was added in Scipy 0.11.0.' + + ' Scipy version in use is %s.' % spv) + warnings.warn(msg) + + ind = self._get_ind(y) + y = gkde.evaluate(ind) + lines = f(ax, ind, y, style=style, **kwds) + return lines + return plotf + + def _post_plot_logic(self): + for ax in self.axes: + ax.set_ylabel('Density') + + class PiePlot(MPLPlot): def __init__(self, data, kind=None, **kwargs): @@ -1964,11 +2025,8 @@ class BoxPlot(MPLPlot): pass -class HistPlot(MPLPlot): - pass - # kinds supported by both dataframe and series -_common_kinds = ['line', 'bar', 'barh', 'kde', 'density', 'area'] +_common_kinds = ['line', 'bar', 'barh', 'kde', 'density', 'area', 'hist'] # kinds supported by dataframe _dataframe_kinds = ['scatter', 'hexbin'] # kinds supported only by series or dataframe single column @@ -1976,7 +2034,7 @@ class HistPlot(MPLPlot): _all_kinds = _common_kinds + _dataframe_kinds + _series_kinds _plot_klass = {'line': LinePlot, 'bar': BarPlot, 'barh': BarPlot, - 'kde': KdePlot, + 'kde': KdePlot, 'hist': HistPlot, 'scatter': ScatterPlot, 'hexbin': HexBinPlot, 'area': AreaPlot, 'pie': PiePlot} @@ -2023,10 +2081,11 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True, ax : matplotlib axis object, default None style : list or dict matplotlib line style per column - kind : {'line', 'bar', 'barh', 'kde', 'density', 'area', scatter', 'hexbin'} + kind : {'line', 'bar', 'barh', 'hist', 'kde', 'density', 'area', 'scatter', 'hexbin'} line : line plot bar : vertical bar plot barh : horizontal bar plot + hist : histogram kde/density : Kernel Density Estimation plot area : area plot scatter : scatter plot @@ -2170,10 +2229,11 @@ def plot_series(series, label=None, kind='line', use_index=True, rot=None, Parameters ---------- label : label argument to provide to plot - kind : {'line', 'bar', 'barh', 'kde', 'density', 'area'} + kind : {'line', 'bar', 'barh', 'hist', 'kde', 'density', 'area'} line : line plot bar : vertical bar plot barh : horizontal bar plot + hist : histogram kde/density : Kernel Density Estimation plot area : area plot use_index : boolean, default True
Because `hist` and `boxplot` are separated from normal `plot`, there are some inconsistencies with these functions. Looks better to include them to `MPLPlot` framework. Maybe `scatter` and `hist` can be deprecated in 0.15 if `MPLPlot` can offer better `GroupBy` plot (plan to do in separate PR). ### Example This allows to use `kind='hist` in `DataFrame.plot` and `Series.plot`. (No changes for `DataFrame.hist`) ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt df = pd.DataFrame(np.random.randn(1000, 5)) df.plot(kind='hist', subplots=True) ``` ![figure_1](https://cloud.githubusercontent.com/assets/1696302/3712412/16aa1896-1513-11e4-863d-e0b6d631c12f.png) ``` df.plot(kind='hist', stacked=True) ``` ![figure_2](https://cloud.githubusercontent.com/assets/1696302/3712413/1bded702-1513-11e4-9ff3-3947783d5213.png) ### Remaining Items - [x] Add a release note in API section detailing this change/enhancement - [x] Modify doc - [x] Add tests (both histogram and kde) - [x] Add support for `rot` and `fontsize` (depending on `orientation` kw) (rely on #7844) - [x] Add tests for xticklabels and yticklabels - [x] Add tests for colors - [x] Handling nan - [x] Check `stacked=True` can be supported (`DataFrame.hist` doesn't support it though..). - [x] Add tests for stacking
https://api.github.com/repos/pandas-dev/pandas/pulls/7809
2014-07-20T14:35:02Z
2014-08-11T15:30:26Z
2014-08-11T15:30:26Z
2014-08-12T02:00:00Z
Docs: Panel.dropna now exists, update docs accordingly.
diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst index 4f4e11e39ae48..d3024daaa59c9 100644 --- a/doc/source/missing_data.rst +++ b/doc/source/missing_data.rst @@ -267,9 +267,8 @@ data. To do this, use the **dropna** method: df.dropna(axis=1) df['one'].dropna() -**dropna** is presently only implemented for Series and DataFrame, but will be -eventually added to Panel. Series.dropna is a simpler method as it only has one -axis to consider. DataFrame.dropna has considerably more options, which can be +Series.dropna is a simpler method as it only has one axis to consider. +DataFrame.dropna has considerably more options than Series.dropna, which can be examined :ref:`in the API <api.dataframe.missing>`. .. _missing_data.interpolate:
https://api.github.com/repos/pandas-dev/pandas/pulls/7806
2014-07-19T13:57:47Z
2014-07-23T21:30:04Z
2014-07-23T21:30:04Z
2014-07-23T21:30:04Z
Docs: Be more specific about inf/-inf no longer being treated as nulls.
diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst index b0319c01b2737..4f4e11e39ae48 100644 --- a/doc/source/missing_data.rst +++ b/doc/source/missing_data.rst @@ -68,7 +68,7 @@ detect this value with data of different types: floating point, integer, boolean, and general object. In many cases, however, the Python ``None`` will arise and we wish to also consider that "missing" or "null". -Until recently, for legacy reasons ``inf`` and ``-inf`` were also +Prior to version v0.10.0 ``inf`` and ``-inf`` were also considered to be "null" in computations. This is no longer the case by default; use the ``mode.use_inf_as_null`` option to recover it.
https://api.github.com/repos/pandas-dev/pandas/pulls/7805
2014-07-19T12:01:20Z
2014-07-19T13:04:21Z
2014-07-19T13:04:21Z
2014-07-19T14:01:15Z
DOC: added nanosecond frequencies to doc
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index cbfb20c6f9d7d..05fd82b2f448d 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -700,6 +700,7 @@ frequencies. We will refer to these aliases as *offset aliases* "S", "secondly frequency" "L", "milliseonds" "U", "microseconds" + "N", "nanoseconds" Combining Aliases ~~~~~~~~~~~~~~~~~
https://api.github.com/repos/pandas-dev/pandas/pulls/7804
2014-07-19T11:00:15Z
2014-07-19T12:06:49Z
2014-07-19T12:06:49Z
2014-07-19T14:01:25Z
BUG: is_superperiod and is_subperiod cannot handle higher freq than S
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index ca24eb3f910ed..ccea9de8bcbcf 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -192,6 +192,7 @@ Bug Fixes +- Bug in ``is_superperiod`` and ``is_subperiod`` cannot handle higher frequencies than ``S`` (:issue:`7760`, :issue:`7772`, :issue:`7803`) diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index fe61e5f0acd9b..073f6e13047e9 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -929,25 +929,31 @@ def is_subperiod(source, target): if _is_quarterly(source): return _quarter_months_conform(_get_rule_month(source), _get_rule_month(target)) - return source in ['D', 'C', 'B', 'M', 'H', 'T', 'S'] + return source in ['D', 'C', 'B', 'M', 'H', 'T', 'S', 'L', 'U', 'N'] elif _is_quarterly(target): - return source in ['D', 'C', 'B', 'M', 'H', 'T', 'S'] + return source in ['D', 'C', 'B', 'M', 'H', 'T', 'S', 'L', 'U', 'N'] elif target == 'M': - return source in ['D', 'C', 'B', 'H', 'T', 'S'] + return source in ['D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N'] elif _is_weekly(target): - return source in [target, 'D', 'C', 'B', 'H', 'T', 'S'] + return source in [target, 'D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N'] elif target == 'B': - return source in ['B', 'H', 'T', 'S'] + return source in ['B', 'H', 'T', 'S', 'L', 'U', 'N'] elif target == 'C': - return source in ['C', 'H', 'T', 'S'] + return source in ['C', 'H', 'T', 'S', 'L', 'U', 'N'] elif target == 'D': - return source in ['D', 'H', 'T', 'S'] + return source in ['D', 'H', 'T', 'S', 'L', 'U', 'N'] elif target == 'H': - return source in ['H', 'T', 'S'] + return source in ['H', 'T', 'S', 'L', 'U', 'N'] elif target == 'T': - return source in ['T', 'S'] + return source in ['T', 'S', 'L', 'U', 'N'] elif target == 'S': - return source in ['S'] + return source in ['S', 'L', 'U', 'N'] + elif target == 'L': + return source in ['L', 'U', 'N'] + elif target == 'U': + return source in ['U', 'N'] + elif target == 'N': + return source in ['N'] def is_superperiod(source, target): @@ -982,25 +988,31 @@ def is_superperiod(source, target): smonth = _get_rule_month(source) tmonth = _get_rule_month(target) return _quarter_months_conform(smonth, tmonth) - return target in ['D', 'C', 'B', 'M', 'H', 'T', 'S'] + return target in ['D', 'C', 'B', 'M', 'H', 'T', 'S', 'L', 'U', 'N'] elif _is_quarterly(source): - return target in ['D', 'C', 'B', 'M', 'H', 'T', 'S'] + return target in ['D', 'C', 'B', 'M', 'H', 'T', 'S', 'L', 'U', 'N'] elif source == 'M': - return target in ['D', 'C', 'B', 'H', 'T', 'S'] + return target in ['D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N'] elif _is_weekly(source): - return target in [source, 'D', 'C', 'B', 'H', 'T', 'S'] + return target in [source, 'D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N'] elif source == 'B': - return target in ['D', 'C', 'B', 'H', 'T', 'S'] + return target in ['D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N'] elif source == 'C': - return target in ['D', 'C', 'B', 'H', 'T', 'S'] + return target in ['D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N'] elif source == 'D': - return target in ['D', 'C', 'B', 'H', 'T', 'S'] + return target in ['D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N'] elif source == 'H': - return target in ['H', 'T', 'S'] + return target in ['H', 'T', 'S', 'L', 'U', 'N'] elif source == 'T': - return target in ['T', 'S'] + return target in ['T', 'S', 'L', 'U', 'N'] elif source == 'S': - return target in ['S'] + return target in ['S', 'L', 'U', 'N'] + elif source == 'L': + return target in ['L', 'U', 'N'] + elif source == 'U': + return target in ['U', 'N'] + elif source == 'N': + return target in ['N'] def _get_rule_month(source, default='DEC'): diff --git a/pandas/tseries/tests/test_frequencies.py b/pandas/tseries/tests/test_frequencies.py index 37371b5828c8c..10a8286f4bec9 100644 --- a/pandas/tseries/tests/test_frequencies.py +++ b/pandas/tseries/tests/test_frequencies.py @@ -342,6 +342,16 @@ def test_is_superperiod_subperiod(): assert(fmod.is_superperiod(offsets.Hour(), offsets.Minute())) assert(fmod.is_subperiod(offsets.Minute(), offsets.Hour())) + assert(fmod.is_superperiod(offsets.Second(), offsets.Milli())) + assert(fmod.is_subperiod(offsets.Milli(), offsets.Second())) + + assert(fmod.is_superperiod(offsets.Milli(), offsets.Micro())) + assert(fmod.is_subperiod(offsets.Micro(), offsets.Milli())) + + assert(fmod.is_superperiod(offsets.Micro(), offsets.Nano())) + assert(fmod.is_subperiod(offsets.Nano(), offsets.Micro())) + + if __name__ == '__main__': import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], diff --git a/pandas/tseries/tests/test_plotting.py b/pandas/tseries/tests/test_plotting.py index 0bdba3751b6fd..5742b8e9bfaae 100644 --- a/pandas/tseries/tests/test_plotting.py +++ b/pandas/tseries/tests/test_plotting.py @@ -707,6 +707,28 @@ def test_from_weekly_resampling(self): for l in ax.get_lines(): self.assertTrue(PeriodIndex(data=l.get_xdata()).freq.startswith('W')) + @slow + def test_mixed_freq_second_millisecond(self): + # GH 7772, GH 7760 + idxh = date_range('2014-07-01 09:00', freq='S', periods=50) + idxl = date_range('2014-07-01 09:00', freq='100L', periods=500) + high = Series(np.random.randn(len(idxh)), idxh) + low = Series(np.random.randn(len(idxl)), idxl) + # high to low + high.plot() + ax = low.plot() + self.assertEqual(len(ax.get_lines()), 2) + for l in ax.get_lines(): + self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'L') + tm.close() + + # low to high + low.plot() + ax = high.plot() + self.assertEqual(len(ax.get_lines()), 2) + for l in ax.get_lines(): + self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'L') + @slow def test_irreg_dtypes(self): # date
Closes #7772, Closes #7760.
https://api.github.com/repos/pandas-dev/pandas/pulls/7803
2014-07-19T10:50:15Z
2014-07-19T14:11:35Z
2014-07-19T14:11:35Z
2014-07-19T15:21:58Z
BUG: reset_index with MultiIndex contains PeriodIndex raises ValueError
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index ca24eb3f910ed..3ce193763779b 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -193,6 +193,8 @@ Bug Fixes +- Bug in ``DataFrame.reset_index`` which has ``MultiIndex`` contains ``PeriodIndex`` or ``DatetimeIndex`` with tz raises ``ValueError`` (:issue:`7746`, :issue:`7793`) + diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 17bef8dd28cf4..4f558dda756dd 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2452,13 +2452,13 @@ def _maybe_casted_values(index, labels=None): if values.dtype == np.object_: values = lib.maybe_convert_objects(values) - # if we have the labels, extract the values with a mask - if labels is not None: - mask = labels == -1 - values = values.take(labels) - if mask.any(): - values, changed = com._maybe_upcast_putmask(values, - mask, np.nan) + # if we have the labels, extract the values with a mask + if labels is not None: + mask = labels == -1 + values = values.take(labels) + if mask.any(): + values, changed = com._maybe_upcast_putmask(values, + mask, np.nan) return values new_index = np.arange(len(new_obj)) diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index c0ca5451ef1d2..d8e17c4d1d290 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -2118,6 +2118,33 @@ def test_reset_index_datetime(self): expected['idx3'] = expected['idx3'].apply(lambda d: pd.Timestamp(d, tz='Europe/Paris')) assert_frame_equal(df.reset_index(), expected) + # GH 7793 + idx = pd.MultiIndex.from_product([['a','b'], pd.date_range('20130101', periods=3, tz=tz)]) + df = pd.DataFrame(np.arange(6).reshape(6,1), columns=['a'], index=idx) + + expected = pd.DataFrame({'level_0': 'a a a b b b'.split(), + 'level_1': [datetime.datetime(2013, 1, 1), + datetime.datetime(2013, 1, 2), + datetime.datetime(2013, 1, 3)] * 2, + 'a': np.arange(6, dtype='int64')}, + columns=['level_0', 'level_1', 'a']) + expected['level_1'] = expected['level_1'].apply(lambda d: pd.Timestamp(d, offset='D', tz=tz)) + assert_frame_equal(df.reset_index(), expected) + + def test_reset_index_period(self): + # GH 7746 + idx = pd.MultiIndex.from_product([pd.period_range('20130101', periods=3, freq='M'), + ['a','b','c']], names=['month', 'feature']) + + df = pd.DataFrame(np.arange(9).reshape(-1,1), index=idx, columns=['a']) + expected = pd.DataFrame({'month': [pd.Period('2013-01', freq='M')] * 3 + + [pd.Period('2013-02', freq='M')] * 3 + + [pd.Period('2013-03', freq='M')] * 3, + 'feature': ['a', 'b', 'c'] * 3, + 'a': np.arange(9, dtype='int64')}, + columns=['month', 'feature', 'a']) + assert_frame_equal(df.reset_index(), expected) + def test_set_index_period(self): # GH 6631 df = DataFrame(np.random.random(6))
Closes #7746, Closes #7793. Sorry, caused by #7533. The `ValueError` is being raised when `PeriodIndex` or `DatetimeIndex` with tz 's unique values are less than `MultiIndex` length.
https://api.github.com/repos/pandas-dev/pandas/pulls/7802
2014-07-19T09:10:42Z
2014-07-19T13:39:59Z
2014-07-19T13:39:59Z
2014-07-19T15:24:03Z
BUG: timeseries subplots may display unnecessary minor ticklabels
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index 5e3f97944c243..5edc337a1c6a5 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -199,6 +199,7 @@ Bug Fixes +- Bug in ``DataFrame.plot`` with ``subplots=True`` may draw unnecessary minor xticks and yticks (:issue:`7801`) diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index 00045e88ba2f0..f9ae058c065e3 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -126,11 +126,14 @@ def _check_visible(self, collections, visible=True): Parameters ---------- - collections : list-like - list or collection of target artist + collections : matplotlib Artist or its list-like + target Artist or its list or collection visible : bool expected visibility """ + from matplotlib.collections import Collection + if not isinstance(collections, Collection) and not com.is_list_like(collections): + collections = [collections] for patch in collections: self.assertEqual(patch.get_visible(), visible) @@ -861,9 +864,12 @@ def test_plot(self): axes = _check_plot_works(df.plot, subplots=True, title='blah') self._check_axes_shape(axes, axes_num=3, layout=(3, 1)) for ax in axes[:2]: + self._check_visible(ax.xaxis) # xaxis must be visible for grid self._check_visible(ax.get_xticklabels(), visible=False) + self._check_visible(ax.get_xticklabels(minor=True), visible=False) self._check_visible([ax.xaxis.get_label()], visible=False) for ax in [axes[2]]: + self._check_visible(ax.xaxis) self._check_visible(ax.get_xticklabels()) self._check_visible([ax.xaxis.get_label()]) @@ -1017,21 +1023,61 @@ def test_subplots(self): self._check_legend_labels(ax, labels=[com.pprint_thing(column)]) for ax in axes[:-2]: + self._check_visible(ax.xaxis) # xaxis must be visible for grid self._check_visible(ax.get_xticklabels(), visible=False) + self._check_visible(ax.get_xticklabels(minor=True), visible=False) + self._check_visible(ax.xaxis.get_label(), visible=False) self._check_visible(ax.get_yticklabels()) + self._check_visible(axes[-1].xaxis) self._check_visible(axes[-1].get_xticklabels()) + self._check_visible(axes[-1].get_xticklabels(minor=True)) + self._check_visible(axes[-1].xaxis.get_label()) self._check_visible(axes[-1].get_yticklabels()) axes = df.plot(kind=kind, subplots=True, sharex=False) for ax in axes: + self._check_visible(ax.xaxis) self._check_visible(ax.get_xticklabels()) + self._check_visible(ax.get_xticklabels(minor=True)) + self._check_visible(ax.xaxis.get_label()) self._check_visible(ax.get_yticklabels()) axes = df.plot(kind=kind, subplots=True, legend=False) for ax in axes: self.assertTrue(ax.get_legend() is None) + @slow + def test_subplots_timeseries(self): + idx = date_range(start='2014-07-01', freq='M', periods=10) + df = DataFrame(np.random.rand(10, 3), index=idx) + + for kind in ['line', 'area']: + axes = df.plot(kind=kind, subplots=True, sharex=True) + self._check_axes_shape(axes, axes_num=3, layout=(3, 1)) + + for ax in axes[:-2]: + # GH 7801 + self._check_visible(ax.xaxis) # xaxis must be visible for grid + self._check_visible(ax.get_xticklabels(), visible=False) + self._check_visible(ax.get_xticklabels(minor=True), visible=False) + self._check_visible(ax.xaxis.get_label(), visible=False) + self._check_visible(ax.get_yticklabels()) + + self._check_visible(axes[-1].xaxis) + self._check_visible(axes[-1].get_xticklabels()) + self._check_visible(axes[-1].get_xticklabels(minor=True)) + self._check_visible(axes[-1].xaxis.get_label()) + self._check_visible(axes[-1].get_yticklabels()) + + axes = df.plot(kind=kind, subplots=True, sharex=False) + for ax in axes: + self._check_visible(ax.xaxis) + self._check_visible(ax.get_xticklabels()) + self._check_visible(ax.get_xticklabels(minor=True)) + self._check_visible(ax.xaxis.get_label()) + self._check_visible(ax.get_yticklabels()) + def test_negative_log(self): df = - DataFrame(rand(6, 4), index=list(string.ascii_letters[:6]), diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index d3ea809b79b76..3570b605c714e 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -2972,16 +2972,35 @@ def _subplots(nrows=1, ncols=1, naxes=None, sharex=False, sharey=False, squeeze= axarr[i] = ax if nplots > 1: + if sharex and nrows > 1: for ax in axarr[:naxes][:-ncols]: # only bottom row for label in ax.get_xticklabels(): label.set_visible(False) + try: + # set_visible will not be effective if + # minor axis has NullLocator and NullFormattor (default) + import matplotlib.ticker as ticker + ax.xaxis.set_minor_locator(ticker.AutoLocator()) + ax.xaxis.set_minor_formatter(ticker.FormatStrFormatter('')) + for label in ax.get_xticklabels(minor=True): + label.set_visible(False) + except Exception: # pragma no cover + pass ax.xaxis.get_label().set_visible(False) if sharey and ncols > 1: for i, ax in enumerate(axarr): if (i % ncols) != 0: # only first column for label in ax.get_yticklabels(): label.set_visible(False) + try: + import matplotlib.ticker as ticker + ax.yaxis.set_minor_locator(ticker.AutoLocator()) + ax.yaxis.set_minor_formatter(ticker.FormatStrFormatter('')) + for label in ax.get_yticklabels(minor=True): + label.set_visible(False) + except Exception: # pragma no cover + pass ax.yaxis.get_label().set_visible(False) if naxes != nplots:
Related to #7457. The fix was incomplete because it only hides major ticklabels, not minor ticklabels. This causes incorrect result in time-series plot. (And another problem is `rot` default is not applied to minor ticks. I'll check this separatelly) ``` df = pd.DataFrame(np.random.randn(10, 4), index=pd.date_range(start='2014-07-01', freq='M', periods=10)) df.plot(subplots=True) ``` ### Current result: Minor ticklabels of top 3 axes are not hidden. ![figure_ng](https://cloud.githubusercontent.com/assets/1696302/3634314/36ebc68c-0f22-11e4-9f9c-da803dd27d5e.png) ### Result after fix: ![figure_ok](https://cloud.githubusercontent.com/assets/1696302/3634865/57a28f1a-0f4e-11e4-847f-fff225740439.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/7801
2014-07-19T08:57:16Z
2014-07-22T11:38:59Z
2014-07-22T11:38:59Z
2014-07-23T11:08:36Z
Correct docs structure in indexing docs.
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index 495d97f340d31..837e3b386f3d0 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -2170,7 +2170,7 @@ add an index after you've already done so. There are a couple of different ways. Add an index using DataFrame columns ------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. _indexing.set_index: @@ -2213,7 +2213,7 @@ the index in-place (without creating a new object): data Remove / reset the index, ``reset_index`` ------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As a convenience, there is a new function on DataFrame called ``reset_index`` which transfers the index values into the DataFrame's columns and sets a simple @@ -2244,7 +2244,7 @@ discards the index, instead of putting index values in the DataFrame's columns. deprecated. Adding an ad hoc index ----------------------- +~~~~~~~~~~~~~~~~~~~~~~ If you create an index yourself, you can just assign it to the ``index`` field:
https://api.github.com/repos/pandas-dev/pandas/pulls/7800
2014-07-19T07:55:11Z
2014-07-19T12:07:37Z
2014-07-19T12:07:37Z
2014-07-19T12:07:41Z
BUG: tslib.tz_convert and tslib.tz_convert_single may output different result in DST
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index dc8ed4c9f5aac..109ed8b286c22 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -233,8 +233,8 @@ Enhancements - - +- Bug in ``tslib.tz_convert`` and ``tslib.tz_convert_single`` may return different results (:issue:`7798`) +- Bug in ``DatetimeIndex.intersection`` of non-overlapping timestamps with tz raises ``IndexError`` (:issue:`7880`) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 1b5baf1bfe9da..88a86da27daf9 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -12636,23 +12636,6 @@ def test_consolidate_datetime64(self): assert_array_equal(df.starting.values, ser_starting.index.values) assert_array_equal(df.ending.values, ser_ending.index.values) - def test_tslib_tz_convert_trans_pos_plus_1__bug(self): - # Regression test for tslib.tz_convert(vals, tz1, tz2). - # See https://github.com/pydata/pandas/issues/4496 for details. - idx = pd.date_range(datetime(2011, 3, 26, 23), datetime(2011, 3, 27, 1), freq='1min') - idx = idx.tz_localize('UTC') - idx = idx.tz_convert('Europe/Moscow') - - test_vector = pd.Series([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 5], dtype=int) - - hours = idx.hour - - np.testing.assert_equal(hours, test_vector.values) - def _check_bool_op(self, name, alternative, frame=None, has_skipna=True, has_bool_only=False): if frame is None: diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 9bbcc781ca9d6..edc7b075da6f8 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -683,25 +683,6 @@ def infer_freq(index, warn=True): _ONE_HOUR = 60 * _ONE_MINUTE _ONE_DAY = 24 * _ONE_HOUR -def _tz_convert_with_transitions(values, to_tz, from_tz): - """ - convert i8 values from the specificed timezone to the to_tz zone, taking - into account DST transitions - """ - - # vectorization is slow, so tests if we can do this via the faster tz_convert - f = lambda x: tslib.tz_convert_single(x, to_tz, from_tz) - - if len(values) > 2: - first_slow, last_slow = f(values[0]),f(values[-1]) - - first_fast, last_fast = tslib.tz_convert(np.array([values[0],values[-1]],dtype='i8'),to_tz,from_tz) - - # don't cross a DST, so ok - if first_fast == first_slow and last_fast == last_slow: - return tslib.tz_convert(values,to_tz,from_tz) - - return np.vectorize(f)(values) class _FrequencyInferer(object): """ @@ -713,7 +694,7 @@ def __init__(self, index, warn=True): self.values = np.asarray(index).view('i8') if index.tz is not None: - self.values = _tz_convert_with_transitions(self.values,'UTC',index.tz) + self.values = tslib.tz_convert(self.values, 'UTC', index.tz) self.warn = warn diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 518bb4180ec89..5f7c93d38653a 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -14,7 +14,7 @@ from pandas.compat import u from pandas.tseries.frequencies import ( infer_freq, to_offset, get_period_alias, - Resolution, _tz_convert_with_transitions) + Resolution) from pandas.core.base import DatetimeIndexOpsMixin from pandas.tseries.offsets import DateOffset, generate_range, Tick, CDay from pandas.tseries.tools import parse_time_string, normalize_date @@ -1569,7 +1569,7 @@ def insert(self, loc, item): new_dates = np.concatenate((self[:loc].asi8, [item.view(np.int64)], self[loc:].asi8)) if self.tz is not None: - new_dates = _tz_convert_with_transitions(new_dates,'UTC',self.tz) + new_dates = tslib.tz_convert(new_dates, 'UTC', self.tz) return DatetimeIndex(new_dates, name=self.name, freq=freq, tz=self.tz) except (AttributeError, TypeError): @@ -1606,7 +1606,7 @@ def delete(self, loc): freq = self.freq if self.tz is not None: - new_dates = _tz_convert_with_transitions(new_dates, 'UTC', self.tz) + new_dates = tslib.tz_convert(new_dates, 'UTC', self.tz) return DatetimeIndex(new_dates, name=self.name, freq=freq, tz=self.tz) def _view_like(self, ndarray): diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index 9d5f45735feb5..c54c133dd2afe 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -3203,8 +3203,8 @@ def test_union(self): def test_intersection(self): # GH 4690 (with tz) - for tz in [None, 'Asia/Tokyo']: - rng = date_range('6/1/2000', '6/30/2000', freq='D', name='idx') + for tz in [None, 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific']: + base = date_range('6/1/2000', '6/30/2000', freq='D', name='idx') # if target has the same name, it is preserved rng2 = date_range('5/15/2000', '6/20/2000', freq='D', name='idx') @@ -3214,16 +3214,18 @@ def test_intersection(self): rng3 = date_range('5/15/2000', '6/20/2000', freq='D', name='other') expected3 = date_range('6/1/2000', '6/20/2000', freq='D', name=None) - result2 = rng.intersection(rng2) - result3 = rng.intersection(rng3) - for (result, expected) in [(result2, expected2), (result3, expected3)]: + rng4 = date_range('7/1/2000', '7/31/2000', freq='D', name='idx') + expected4 = DatetimeIndex([], name='idx') + + for (rng, expected) in [(rng2, expected2), (rng3, expected3), (rng4, expected4)]: + result = base.intersection(rng) self.assertTrue(result.equals(expected)) self.assertEqual(result.name, expected.name) self.assertEqual(result.freq, expected.freq) self.assertEqual(result.tz, expected.tz) # non-monotonic - rng = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-02', '2011-01-03'], + base = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-02', '2011-01-03'], tz=tz, name='idx') rng2 = DatetimeIndex(['2011-01-04', '2011-01-02', '2011-02-02', '2011-02-03'], @@ -3234,10 +3236,12 @@ def test_intersection(self): tz=tz, name='other') expected3 = DatetimeIndex(['2011-01-04', '2011-01-02'], tz=tz, name=None) - result2 = rng.intersection(rng2) - result3 = rng.intersection(rng3) - for (result, expected) in [(result2, expected2), (result3, expected3)]: - print(result, expected) + # GH 7880 + rng4 = date_range('7/1/2000', '7/31/2000', freq='D', tz=tz, name='idx') + expected4 = DatetimeIndex([], tz=tz, name='idx') + + for (rng, expected) in [(rng2, expected2), (rng3, expected3), (rng4, expected4)]: + result = base.intersection(rng) self.assertTrue(result.equals(expected)) self.assertEqual(result.name, expected.name) self.assertIsNone(result.freq) diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py index 21f915cb50e21..ab969f13289ac 100644 --- a/pandas/tseries/tests/test_timezones.py +++ b/pandas/tseries/tests/test_timezones.py @@ -787,6 +787,64 @@ def test_utc_with_system_utc(self): # check that the time hasn't changed. self.assertEqual(ts, ts.tz_convert(dateutil.tz.tzutc())) + def test_tslib_tz_convert_trans_pos_plus_1__bug(self): + # Regression test for tslib.tz_convert(vals, tz1, tz2). + # See https://github.com/pydata/pandas/issues/4496 for details. + for freq, n in [('H', 1), ('T', 60), ('S', 3600)]: + idx = date_range(datetime(2011, 3, 26, 23), datetime(2011, 3, 27, 1), freq=freq) + idx = idx.tz_localize('UTC') + idx = idx.tz_convert('Europe/Moscow') + + expected = np.repeat(np.array([3, 4, 5]), np.array([n, n, 1])) + self.assert_numpy_array_equal(idx.hour, expected) + + def test_tslib_tz_convert_dst(self): + for freq, n in [('H', 1), ('T', 60), ('S', 3600)]: + # Start DST + idx = date_range('2014-03-08 23:00', '2014-03-09 09:00', freq=freq, tz='UTC') + idx = idx.tz_convert('US/Eastern') + expected = np.repeat(np.array([18, 19, 20, 21, 22, 23, 0, 1, 3, 4, 5]), + np.array([n, n, n, n, n, n, n, n, n, n, 1])) + self.assert_numpy_array_equal(idx.hour, expected) + + idx = date_range('2014-03-08 18:00', '2014-03-09 05:00', freq=freq, tz='US/Eastern') + idx = idx.tz_convert('UTC') + expected = np.repeat(np.array([23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), + np.array([n, n, n, n, n, n, n, n, n, n, 1])) + self.assert_numpy_array_equal(idx.hour, expected) + + # End DST + idx = date_range('2014-11-01 23:00', '2014-11-02 09:00', freq=freq, tz='UTC') + idx = idx.tz_convert('US/Eastern') + expected = np.repeat(np.array([19, 20, 21, 22, 23, 0, 1, 1, 2, 3, 4]), + np.array([n, n, n, n, n, n, n, n, n, n, 1])) + self.assert_numpy_array_equal(idx.hour, expected) + + idx = date_range('2014-11-01 18:00', '2014-11-02 05:00', freq=freq, tz='US/Eastern') + idx = idx.tz_convert('UTC') + expected = np.repeat(np.array([22, 23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), + np.array([n, n, n, n, n, n, n, n, n, n, n, n, 1])) + self.assert_numpy_array_equal(idx.hour, expected) + + # daily + # Start DST + idx = date_range('2014-03-08 00:00', '2014-03-09 00:00', freq='D', tz='UTC') + idx = idx.tz_convert('US/Eastern') + self.assert_numpy_array_equal(idx.hour, np.array([19, 19])) + + idx = date_range('2014-03-08 00:00', '2014-03-09 00:00', freq='D', tz='US/Eastern') + idx = idx.tz_convert('UTC') + self.assert_numpy_array_equal(idx.hour, np.array([5, 5])) + + # End DST + idx = date_range('2014-11-01 00:00', '2014-11-02 00:00', freq='D', tz='UTC') + idx = idx.tz_convert('US/Eastern') + self.assert_numpy_array_equal(idx.hour, np.array([20, 20])) + + idx = date_range('2014-11-01 00:00', '2014-11-02 000:00', freq='D', tz='US/Eastern') + idx = idx.tz_convert('UTC') + self.assert_numpy_array_equal(idx.hour, np.array([4, 4])) + class TestTimeZoneCacheKey(tm.TestCase): def test_cache_keys_are_distinct_for_pytz_vs_dateutil(self): diff --git a/pandas/tseries/tests/test_tslib.py b/pandas/tseries/tests/test_tslib.py index a47d6a178f8b2..79eaa97d50322 100644 --- a/pandas/tseries/tests/test_tslib.py +++ b/pandas/tseries/tests/test_tslib.py @@ -425,6 +425,44 @@ def test_period_ordinal_business_day(self): # Tuesday self.assertEqual(11418, period_ordinal(2013, 10, 8, 0, 0, 0, 0, 0, get_freq('B'))) + def test_tslib_tz_convert(self): + def compare_utc_to_local(tz_didx, utc_didx): + f = lambda x: tslib.tz_convert_single(x, 'UTC', tz_didx.tz) + result = tslib.tz_convert(tz_didx.asi8, 'UTC', tz_didx.tz) + result_single = np.vectorize(f)(tz_didx.asi8) + self.assert_numpy_array_equal(result, result_single) + + def compare_local_to_utc(tz_didx, utc_didx): + f = lambda x: tslib.tz_convert_single(x, tz_didx.tz, 'UTC') + result = tslib.tz_convert(utc_didx.asi8, tz_didx.tz, 'UTC') + result_single = np.vectorize(f)(utc_didx.asi8) + self.assert_numpy_array_equal(result, result_single) + + for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern', 'Europe/Moscow']: + # US: 2014-03-09 - 2014-11-11 + # MOSCOW: 2014-10-26 / 2014-12-31 + tz_didx = date_range('2014-03-01', '2015-01-10', freq='H', tz=tz) + utc_didx = date_range('2014-03-01', '2015-01-10', freq='H') + compare_utc_to_local(tz_didx, utc_didx) + # local tz to UTC can be differ in hourly (or higher) freqs because of DST + compare_local_to_utc(tz_didx, utc_didx) + + tz_didx = date_range('2000-01-01', '2020-01-01', freq='D', tz=tz) + utc_didx = date_range('2000-01-01', '2020-01-01', freq='D') + compare_utc_to_local(tz_didx, utc_didx) + compare_local_to_utc(tz_didx, utc_didx) + + tz_didx = date_range('2000-01-01', '2100-01-01', freq='A', tz=tz) + utc_didx = date_range('2000-01-01', '2100-01-01', freq='A') + compare_utc_to_local(tz_didx, utc_didx) + compare_local_to_utc(tz_didx, utc_didx) + + # Check empty array + result = tslib.tz_convert(np.array([], dtype=np.int64), + tslib.maybe_get_tz('US/Eastern'), + tslib.maybe_get_tz('Asia/Tokyo')) + self.assert_numpy_array_equal(result, np.array([], dtype=np.int64)) + class TestTimestampOps(tm.TestCase): def test_timestamp_and_datetime(self): self.assertEqual((Timestamp(datetime.datetime(2013, 10, 13)) - datetime.datetime(2013, 10, 12)).days, 1) diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index dc9f3fa258985..b8342baae16bd 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -1907,10 +1907,14 @@ def tz_convert(ndarray[int64_t] vals, object tz1, object tz2): Py_ssize_t i, pos, n = len(vals) int64_t v, offset pandas_datetimestruct dts + Py_ssize_t trans_len if not have_pytz: import pytz + if len(vals) == 0: + return np.array([], dtype=np.int64) + # Convert to UTC if _get_zone(tz1) != 'UTC': @@ -1927,6 +1931,7 @@ def tz_convert(ndarray[int64_t] vals, object tz1, object tz2): else: deltas = _get_deltas(tz1) trans = _get_transitions(tz1) + trans_len = len(trans) pos = trans.searchsorted(vals[0]) - 1 if pos < 0: raise ValueError('First time before start of DST info') @@ -1934,7 +1939,7 @@ def tz_convert(ndarray[int64_t] vals, object tz1, object tz2): offset = deltas[pos] for i in range(n): v = vals[i] - if v >= [pos + 1]: + while pos + 1 < trans_len and v >= trans[pos + 1]: pos += 1 offset = deltas[pos] utc_dates[i] = v - offset @@ -1957,29 +1962,23 @@ def tz_convert(ndarray[int64_t] vals, object tz1, object tz2): # Convert UTC to other timezone trans = _get_transitions(tz2) + trans_len = len(trans) deltas = _get_deltas(tz2) - pos = trans.searchsorted(utc_dates[0]) - if pos == 0: + pos = trans.searchsorted(utc_dates[0]) - 1 + if pos < 0: raise ValueError('First time before start of DST info') - elif pos == len(trans): - return utc_dates + deltas[-1] # TODO: this assumed sortedness :/ - pos -= 1 - offset = deltas[pos] - cdef Py_ssize_t trans_len = len(trans) - for i in range(n): v = utc_dates[i] if vals[i] == NPY_NAT: result[i] = vals[i] else: - if (pos + 1) < trans_len and v >= trans[pos + 1]: + while pos + 1 < trans_len and v >= trans[pos + 1]: pos += 1 offset = deltas[pos] result[i] = v + offset - return result def tz_convert_single(int64_t val, object tz1, object tz2): @@ -2005,7 +2004,7 @@ def tz_convert_single(int64_t val, object tz1, object tz2): elif _get_zone(tz1) != 'UTC': deltas = _get_deltas(tz1) trans = _get_transitions(tz1) - pos = trans.searchsorted(val) - 1 + pos = trans.searchsorted(val, side='right') - 1 if pos < 0: raise ValueError('First time before start of DST info') offset = deltas[pos] @@ -2024,7 +2023,7 @@ def tz_convert_single(int64_t val, object tz1, object tz2): # Convert UTC to other timezone trans = _get_transitions(tz2) deltas = _get_deltas(tz2) - pos = trans.searchsorted(utc_date) - 1 + pos = trans.searchsorted(utc_date, side='right') - 1 if pos < 0: raise ValueError('First time before start of DST info')
These functions may return different result in case of DST. There seems to be 2 problems: - `tslib.tz_convert` checks DST and change `deltas` by adjusting `pos` by 1. If input has time-gaps more than 2 DST spans, result will be incorrect. - `tslib.tz_convert_single` results incorrect if input is just on DST edge. ``` import pandas as pd import numpy as np import datetime import pytz idx = pd.date_range('2014-03-01', '2015-01-10', freq='H') f = lambda x: pd.tslib.tz_convert_single(x, pd.tslib.maybe_get_tz('US/Eastern'), 'UTC') result = pd.tslib.tz_convert(idx.asi8, pd.tslib.maybe_get_tz('US/Eastern'), 'UTC') result_single = np.vectorize(f)(idx.asi8) result[result != result_single] # [1394370000000000000 1394373600000000000 1394377200000000000 ..., # 1414918800000000000 1414922400000000000 1414926000000000000] ``` #### Note Additionally, it was modifed to close #7880 also.
https://api.github.com/repos/pandas-dev/pandas/pulls/7798
2014-07-18T22:36:18Z
2014-08-03T01:41:58Z
2014-08-03T01:41:58Z
2014-08-04T13:18:25Z
BUG: fix reading pre-0.14.1 pickles of containers with one block and dup items
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index 5e3f97944c243..103ac2a34a49a 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -191,6 +191,8 @@ Bug Fixes - Bug in pickles contains ``DateOffset`` may raise ``AttributeError`` when ``normalize`` attribute is reffered internally (:issue:`7748`) +- Bug in pickle deserialization that failed for pre-0.14.1 containers with dup items trying to avoid ambiguity + when matching block and manager items, when there's only one block there's no ambiguity (:issue:`7794`) - Bug in ``is_superperiod`` and ``is_subperiod`` cannot handle higher frequencies than ``S`` (:issue:`7760`, :issue:`7772`, :issue:`7803`) diff --git a/pandas/core/internals.py b/pandas/core/internals.py index f649baeb16278..cad7b579aa554 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -2271,10 +2271,23 @@ def unpickle_block(values, mgr_locs): ax_arrays, bvalues, bitems = state[:3] self.axes = [_ensure_index(ax) for ax in ax_arrays] + + if len(bitems) == 1 and self.axes[0].equals(bitems[0]): + # This is a workaround for pre-0.14.1 pickles that didn't + # support unpickling multi-block frames/panels with non-unique + # columns/items, because given a manager with items ["a", "b", + # "a"] there's no way of knowing which block's "a" is where. + # + # Single-block case can be supported under the assumption that + # block items corresponded to manager items 1-to-1. + all_mgr_locs = [slice(0, len(bitems[0]))] + else: + all_mgr_locs = [self.axes[0].get_indexer(blk_items) + for blk_items in bitems] + self.blocks = tuple( - unpickle_block(values, - self.axes[0].get_indexer(items)) - for values, items in zip(bvalues, bitems)) + unpickle_block(values, mgr_locs) + for values, mgr_locs in zip(bvalues, all_mgr_locs)) self._post_setstate() diff --git a/pandas/io/tests/data/legacy_pickle/0.13.0/0.13.0_x86_64_linux_2.7.8.pickle b/pandas/io/tests/data/legacy_pickle/0.13.0/0.13.0_x86_64_linux_2.7.8.pickle new file mode 100644 index 0000000000000..3ffecb77ef8c9 Binary files /dev/null and b/pandas/io/tests/data/legacy_pickle/0.13.0/0.13.0_x86_64_linux_2.7.8.pickle differ diff --git a/pandas/io/tests/data/legacy_pickle/0.14.0/0.14.0_x86_64_linux_2.7.8.pickle b/pandas/io/tests/data/legacy_pickle/0.14.0/0.14.0_x86_64_linux_2.7.8.pickle new file mode 100644 index 0000000000000..19cbcddc4ded8 Binary files /dev/null and b/pandas/io/tests/data/legacy_pickle/0.14.0/0.14.0_x86_64_linux_2.7.8.pickle differ diff --git a/pandas/io/tests/data/legacy_pickle/0.14.1/0.14.1_x86_64_linux_2.7.8.pickle b/pandas/io/tests/data/legacy_pickle/0.14.1/0.14.1_x86_64_linux_2.7.8.pickle new file mode 100644 index 0000000000000..af530fcd3fb39 Binary files /dev/null and b/pandas/io/tests/data/legacy_pickle/0.14.1/0.14.1_x86_64_linux_2.7.8.pickle differ diff --git a/pandas/io/tests/generate_legacy_pickles.py b/pandas/io/tests/generate_legacy_pickles.py index 3a0386c7660d4..b20a1e5b60b86 100644 --- a/pandas/io/tests/generate_legacy_pickles.py +++ b/pandas/io/tests/generate_legacy_pickles.py @@ -1,6 +1,7 @@ """ self-contained to write legacy pickle files """ from __future__ import print_function + def _create_sp_series(): import numpy as np @@ -53,6 +54,7 @@ def _create_sp_frame(): def create_data(): """ create the pickle data """ + from distutils.version import LooseVersion import numpy as np import pandas from pandas import (Series,TimeSeries,DataFrame,Panel, @@ -92,13 +94,23 @@ def create_data(): index=MultiIndex.from_tuples(tuple(zip(*[['bar','bar','baz','baz','baz'], ['one','two','one','two','three']])), names=['first','second'])), - dup = DataFrame(np.arange(15).reshape(5, 3).astype(np.float64), - columns=['A', 'B', 'A'])) + dup=DataFrame(np.arange(15).reshape(5, 3).astype(np.float64), + columns=['A', 'B', 'A'])) panel = dict(float = Panel(dict(ItemA = frame['float'], ItemB = frame['float']+1)), dup = Panel(np.arange(30).reshape(3, 5, 2).astype(np.float64), items=['A', 'B', 'A'])) + if LooseVersion(pandas.__version__) >= '0.14.1': + # Pre-0.14.1 versions generated non-unpicklable mixed-type frames and + # panels if their columns/items were non-unique. + mixed_dup_df = DataFrame(data) + mixed_dup_df.columns = list("ABCDA") + + mixed_dup_panel = Panel(dict(ItemA=frame['float'], ItemB=frame['int'])) + mixed_dup_panel.items = ['ItemA', 'ItemA'] + frame['mixed_dup'] = mixed_dup_df + panel['mixed_dup'] = mixed_dup_panel return dict( series = series, frame = frame,
Series, frames and panels that contain only one block can be unpickled under the assumption that block items correspond to manager items 1-to-1 (as pointed out in #7329). I still don't have any of darwin/win/32bit platforms at hand, so I cannot post those pickles. Also, my linux has a fresher python than indicated by filenames of existing pickle test data. I did generate some data to test the fix, the question is do you want me to upload it?
https://api.github.com/repos/pandas-dev/pandas/pulls/7794
2014-07-18T17:45:02Z
2014-07-21T11:42:08Z
2014-07-21T11:42:08Z
2014-07-29T10:04:36Z
BUG: read_column did not preserve UTC tzinfo
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index f5926c2d011ee..06c93541a7783 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -196,6 +196,7 @@ Bug Fixes - Bug in Series 0-division with a float and integer operand dtypes (:issue:`7785`) - Bug in ``Series.astype("unicode")`` not calling ``unicode`` on the values correctly (:issue:`7758`) - Bug in ``DataFrame.as_matrix()`` with mixed ``datetime64[ns]`` and ``timedelta64[ns]`` dtypes (:issue:`7778`) +- Bug in ``HDFStore.select_column()`` not preserving UTC timezone info when selecting a DatetimeIndex (:issue:`7777`) - Bug in pickles contains ``DateOffset`` may raise ``AttributeError`` when ``normalize`` attribute is reffered internally (:issue:`7748`) diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index cecbb407d0bd1..c130ed4fc52ba 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -62,6 +62,18 @@ def _ensure_encoding(encoding): encoding = _default_encoding return encoding +def _set_tz(values, tz, preserve_UTC=False): + """ set the timezone if values are an Index """ + if tz is not None and isinstance(values, Index): + tz = _ensure_decoded(tz) + if values.tz is None: + values = values.tz_localize('UTC').tz_convert(tz) + if preserve_UTC: + if tslib.get_timezone(tz) == 'UTC': + values = list(values) + + return values + Term = Expr @@ -1464,11 +1476,7 @@ def convert(self, values, nan_rep, encoding): kwargs['freq'] = None self.values = Index(values, **kwargs) - # set the timezone if indicated - # we stored in utc, so reverse to local timezone - if self.tz is not None: - self.values = self.values.tz_localize( - 'UTC').tz_convert(_ensure_decoded(self.tz)) + self.values = _set_tz(self.values, self.tz) return self @@ -3443,8 +3451,11 @@ def read_column(self, column, where=None, start=None, stop=None, **kwargs): # column must be an indexable or a data column c = getattr(self.table.cols, column) a.set_info(self.info) - return Series(a.convert(c[start:stop], nan_rep=self.nan_rep, - encoding=self.encoding).take_data()) + return Series(_set_tz(a.convert(c[start:stop], + nan_rep=self.nan_rep, + encoding=self.encoding + ).take_data(), + a.tz, True)) raise KeyError("column [%s] not found in the table" % column) diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index c602e8ff1a888..8d7f007f0bda7 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -4299,6 +4299,38 @@ def test_tseries_indices_frame(self): self.assertEqual(type(result.index), type(df.index)) self.assertEqual(result.index.freq, df.index.freq) + def test_tseries_select_index_column(self): + # GH7777 + # selecting a UTC datetimeindex column did + # not preserve UTC tzinfo set before storing + + # check that no tz still works + rng = date_range('1/1/2000', '1/30/2000') + frame = DataFrame(np.random.randn(len(rng), 4), index=rng) + + with ensure_clean_store(self.path) as store: + store.append('frame', frame) + result = store.select_column('frame', 'index') + self.assertEqual(rng.tz, DatetimeIndex(result.values).tz) + + # check utc + rng = date_range('1/1/2000', '1/30/2000', tz='UTC') + frame = DataFrame(np.random.randn(len(rng), 4), index=rng) + + with ensure_clean_store(self.path) as store: + store.append('frame', frame) + result = store.select_column('frame', 'index') + self.assertEqual(rng.tz, DatetimeIndex(result.values).tz) + + # double check non-utc + rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern') + frame = DataFrame(np.random.randn(len(rng), 4), index=rng) + + with ensure_clean_store(self.path) as store: + store.append('frame', frame) + result = store.select_column('frame', 'index') + self.assertEqual(rng.tz, DatetimeIndex(result.values).tz) + def test_unicode_index(self): unicode_values = [u('\u03c3'), u('\u03c3\u03c3')]
BUG: Fixes #7777, HDFStore.read_column did not preserve timezone information when fetching a DatetimeIndex column with tz=UTC
https://api.github.com/repos/pandas-dev/pandas/pulls/7790
2014-07-18T16:00:34Z
2014-07-22T15:24:22Z
2014-07-22T15:24:22Z
2014-07-22T15:26:00Z
BUG/COMPAT: pickled dtindex with freq raises AttributeError in normalize...
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index eb58f46f0f3fe..226ec28089b60 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -189,6 +189,7 @@ Bug Fixes - Bug in ``DataFrame.as_matrix()`` with mixed ``datetime64[ns]`` and ``timedelta64[ns]`` dtypes (:issue:`7778`) +- Bug in pickles contains ``DateOffset`` may raise ``AttributeError`` when ``normalize`` attribute is reffered internally (:issue:`7748`) diff --git a/pandas/io/tests/test_pickle.py b/pandas/io/tests/test_pickle.py index c52a405fe81ea..07d576ac1c8ae 100644 --- a/pandas/io/tests/test_pickle.py +++ b/pandas/io/tests/test_pickle.py @@ -48,9 +48,12 @@ def compare(self, vf): # py3 compat when reading py2 pickle try: data = pandas.read_pickle(vf) - except (ValueError) as detail: - # trying to read a py3 pickle in py2 - return + except (ValueError) as e: + if 'unsupported pickle protocol:' in str(e): + # trying to read a py3 pickle in py2 + return + else: + raise for typ, dv in data.items(): for dt, result in dv.items(): @@ -60,6 +63,7 @@ def compare(self, vf): continue self.compare_element(typ, result, expected) + return data def read_pickles(self, version): if not is_little_endian(): @@ -68,7 +72,14 @@ def read_pickles(self, version): pth = tm.get_data_path('legacy_pickle/{0}'.format(str(version))) for f in os.listdir(pth): vf = os.path.join(pth,f) - self.compare(vf) + data = self.compare(vf) + + if data is None: + continue + + if 'series' in data: + if 'ts' in data['series']: + self._validate_timeseries(data['series']['ts'], self.data['series']['ts']) def test_read_pickles_0_10_1(self): self.read_pickles('0.10.1') @@ -82,6 +93,9 @@ def test_read_pickles_0_12_0(self): def test_read_pickles_0_13_0(self): self.read_pickles('0.13.0') + def test_read_pickles_0_14_0(self): + self.read_pickles('0.14.0') + def test_round_trip_current(self): for typ, dv in self.data.items(): @@ -94,6 +108,14 @@ def test_round_trip_current(self): result = pd.read_pickle(path) self.compare_element(typ, result, expected) + def _validate_timeseries(self, pickled, current): + # GH 7748 + tm.assert_series_equal(pickled, current) + self.assertEqual(pickled.index.freq, current.index.freq) + self.assertEqual(pickled.index.freq.normalize, False) + self.assert_numpy_array_equal(pickled > 0, current > 0) + + if __name__ == '__main__': import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 57181b43df9f6..8f77f88910a3c 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -130,6 +130,9 @@ def __add__(date): _cacheable = False _normalize_cache = True + # default for prior pickles + normalize = False + def __init__(self, n=1, normalize=False, **kwds): self.n = int(n) self.normalize = normalize diff --git a/setup.py b/setup.py index 3ec992d91bb45..844f5742c0e69 100755 --- a/setup.py +++ b/setup.py @@ -578,6 +578,7 @@ def pxd(name): 'tests/data/legacy_pickle/0.11.0/*.pickle', 'tests/data/legacy_pickle/0.12.0/*.pickle', 'tests/data/legacy_pickle/0.13.0/*.pickle', + 'tests/data/legacy_pickle/0.14.0/*.pickle', 'tests/data/*.csv', 'tests/data/*.dta', 'tests/data/*.txt',
Closes #7748.
https://api.github.com/repos/pandas-dev/pandas/pulls/7789
2014-07-18T15:58:00Z
2014-07-19T12:10:05Z
2014-07-19T12:10:05Z
2014-07-19T13:13:34Z
Raise exception on non-unique column index in to_hdf for fixed format.
diff --git a/doc/source/io.rst b/doc/source/io.rst index a363d144b2ba1..91ffb5091e927 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -2311,7 +2311,8 @@ Fixed Format The examples above show storing using ``put``, which write the HDF5 to ``PyTables`` in a fixed array format, called the ``fixed`` format. These types of stores are are **not** appendable once written (though you can simply remove them and rewrite). Nor are they **queryable**; they must be -retrieved in their entirety. These offer very fast writing and slightly faster reading than ``table`` stores. +retrieved in their entirety. They also do not support dataframes with non-unique column names. +The ``fixed`` format stores offer very fast writing and slightly faster reading than ``table`` stores. This format is specified by default when using ``put`` or ``to_hdf`` or by ``format='fixed'`` or ``format='f'`` .. warning:: diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index ca24eb3f910ed..9fbe718b3fc64 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -187,6 +187,7 @@ Bug Fixes - Bug in Series 0-division with a float and integer operand dtypes (:issue:`7785`) - Bug in ``Series.astype("unicode")`` not calling ``unicode`` on the values correctly (:issue:`7758`) - Bug in ``DataFrame.as_matrix()`` with mixed ``datetime64[ns]`` and ``timedelta64[ns]`` dtypes (:issue:`7778`) +- Raise a ``ValueError`` in ``df.to_hdf`` if ``df`` has non-unique columns as the resulting file will be broken (:issue:`7761`) diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 0e6c41a25bbe5..cecbb407d0bd1 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -2680,6 +2680,9 @@ def write(self, obj, **kwargs): self.attrs.ndim = data.ndim for i, ax in enumerate(data.axes): + if i == 0: + if not ax.is_unique: + raise ValueError("Columns index has to be unique for fixed format") self.write_index('axis%d' % i, ax) # Supporting mixed-type DataFrame objects...nontrivial diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index 6a944284035c8..c602e8ff1a888 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -4370,6 +4370,17 @@ def test_categorical(self): # FIXME: TypeError: cannot pass a where specification when reading from a Fixed format store. this store must be selected in its entirety #result = store.select('df', where = ['index>2']) #tm.assert_frame_equal(df[df.index>2],result) + + def test_duplicate_column_name(self): + df = DataFrame(columns=["a", "a"], data=[[0, 0]]) + + with ensure_clean_path(self.path) as path: + self.assertRaises(ValueError, df.to_hdf, path, 'df', format='fixed') + + df.to_hdf(path, 'df', format='table') + other = read_hdf(path, 'df') + tm.assert_frame_equal(df, other) + def _test_sort(obj): if isinstance(obj, DataFrame):
Fixes #7761.
https://api.github.com/repos/pandas-dev/pandas/pulls/7788
2014-07-18T15:11:18Z
2014-07-21T11:39:29Z
2014-07-21T11:39:29Z
2014-08-18T18:40:23Z
BUG: Bug in Series 0-division with a float and integer operand dtypes (GH7785)
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index eb58f46f0f3fe..ca24eb3f910ed 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -184,7 +184,7 @@ There are no experimental changes in 0.15.0 Bug Fixes ~~~~~~~~~ - +- Bug in Series 0-division with a float and integer operand dtypes (:issue:`7785`) - Bug in ``Series.astype("unicode")`` not calling ``unicode`` on the values correctly (:issue:`7758`) - Bug in ``DataFrame.as_matrix()`` with mixed ``datetime64[ns]`` and ``timedelta64[ns]`` dtypes (:issue:`7778`) diff --git a/pandas/core/common.py b/pandas/core/common.py index 1a57c9c33ba7c..04c5140d6a59b 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -1328,7 +1328,7 @@ def _fill_zeros(result, x, y, name, fill): # correctly # GH 6178 if np.isinf(fill): - np.putmask(result,signs<0 & mask, -fill) + np.putmask(result,(signs<0) & mask, -fill) result = result.reshape(shape) diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index fda0abe07050d..e56da6c6522a5 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -2362,6 +2362,17 @@ def test_div(self): expected = Series([np.nan,np.inf,-np.inf]) assert_series_equal(result, expected) + # float/integer issue + # GH 7785 + p = DataFrame({'first': (1,0), 'second': (-0.01,-0.02)}) + expected = Series([-0.01,-np.inf]) + + result = p['second'].div(p['first']) + assert_series_equal(result, expected) + + result = p['second'] / p['first'] + assert_series_equal(result, expected) + def test_operators(self): def _check_op(series, other, op, pos_only=False): @@ -4865,12 +4876,12 @@ def test_astype_unicode(self): test_series = [ Series([digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]), Series([u"データーサイエンス、お前はもう死んでいる"]), - + ] - + former_encoding = None if not compat.PY3: - # in python we can force the default encoding + # in python we can force the default encoding # for this test former_encoding = sys.getdefaultencoding() reload(sys)
closes #7785
https://api.github.com/repos/pandas-dev/pandas/pulls/7786
2014-07-18T14:01:14Z
2014-07-18T14:32:28Z
2014-07-18T14:32:28Z
2014-07-18T14:32:29Z
Docs: Categorical docs fixups.
diff --git a/doc/source/categorical.rst b/doc/source/categorical.rst index 87b59dc735969..c758dde16837b 100644 --- a/doc/source/categorical.rst +++ b/doc/source/categorical.rst @@ -104,7 +104,7 @@ By using some special functions: .. note:: - I contrast to R's `factor` function, there is currently no way to assign/change labels at + In contrast to R's `factor` function, there is currently no way to assign/change labels at creation time. Use `levels` to change the levels after creation time. To get back to the original Series or `numpy` array, use ``Series.astype(original_dtype)`` or @@ -397,7 +397,7 @@ that only values already in the levels can be assigned. Getting ~~~~~~~ -If the slicing operation returns either a `DataFrame` or a a column of type `Series`, +If the slicing operation returns either a `DataFrame` or a column of type `Series`, the ``category`` dtype is preserved. .. ipython:: python @@ -509,7 +509,7 @@ The same applies to ``df.append(df)``. Getting Data In/Out ------------------- -Writing data (`Series`, `Frames`) to a HDF store and reading it in entirety works. Querying the hdf +Writing data (`Series`, `Frames`) to a HDF store and reading it in entirety works. Querying the HDF store does not yet work. .. ipython:: python @@ -539,8 +539,8 @@ store does not yet work. pass -Writing to a csv file will convert the data, effectively removing any information about the -`Categorical` (levels and ordering). So if you read back the csv file you have to convert the +Writing to a CSV file will convert the data, effectively removing any information about the +`Categorical` (levels and ordering). So if you read back the CSV file you have to convert the relevant columns back to `category` and assign the right levels and level ordering. .. ipython:: python @@ -756,4 +756,4 @@ Future compatibility ~~~~~~~~~~~~~~~~~~~~ As `Categorical` is not a native `numpy` dtype, the implementation details of -`Series.cat` can change if such a `numpy` dtype is implemented. \ No newline at end of file +`Series.cat` can change if such a `numpy` dtype is implemented.
https://api.github.com/repos/pandas-dev/pandas/pulls/7783
2014-07-18T11:51:53Z
2014-07-18T12:02:30Z
2014-07-18T12:02:30Z
2014-07-18T12:02:33Z
BUG: Prevent config paths to contain python keywords
diff --git a/pandas/core/config.py b/pandas/core/config.py index 3e8d76500d128..60dc1d7d0341e 100644 --- a/pandas/core/config.py +++ b/pandas/core/config.py @@ -445,7 +445,7 @@ def register_option(key, defval, doc='', validator=None, cb=None): for k in path: if not bool(re.match('^' + tokenize.Name + '$', k)): raise ValueError("%s is not a valid identifier" % k) - if keyword.iskeyword(key): + if keyword.iskeyword(k): raise ValueError("%s is a python keyword" % k) cursor = _global_config diff --git a/pandas/tests/test_config.py b/pandas/tests/test_config.py index e60c9d5bd0fdf..dc5e9a67bdb65 100644 --- a/pandas/tests/test_config.py +++ b/pandas/tests/test_config.py @@ -59,6 +59,7 @@ def test_register_option(self): # no python keywords self.assertRaises(ValueError, self.cf.register_option, 'for', 0) + self.assertRaises(ValueError, self.cf.register_option, 'a.for.b', 0) # must be valid identifier (ensure attribute access works) self.assertRaises(ValueError, self.cf.register_option, 'Oh my Goddess!', 0)
https://api.github.com/repos/pandas-dev/pandas/pulls/7781
2014-07-18T09:42:40Z
2014-09-19T12:22:30Z
2014-09-19T12:22:30Z
2014-09-19T16:11:44Z
API: support `c` and `colormap` args for DataFrame.plot with kind='scatter'
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index bfd484b363dd2..2871d2f628659 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -435,6 +435,8 @@ Enhancements - Added ``layout`` keyword to ``DataFrame.plot`` (:issue:`6667`) - Allow to pass multiple axes to ``DataFrame.plot``, ``hist`` and ``boxplot`` (:issue:`5353`, :issue:`6970`, :issue:`7069`) +- Added support for ``c``, ``colormap`` and ``colorbar`` arguments for + ``DataFrame.plot`` with ``kind='scatter'`` (:issue:`7780`) - ``PeriodIndex`` supports ``resolution`` as the same as ``DatetimeIndex`` (:issue:`7708`) diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst index 1cce55cd53e11..d845ae38f05c2 100644 --- a/doc/source/visualization.rst +++ b/doc/source/visualization.rst @@ -521,6 +521,14 @@ It is recommended to specify ``color`` and ``label`` keywords to distinguish eac df.plot(kind='scatter', x='c', y='d', color='DarkGreen', label='Group 2', ax=ax); +The keyword ``c`` may be given as the name of a column to provide colors for +each point: + +.. ipython:: python + + @savefig scatter_plot_colored.png + df.plot(kind='scatter', x='a', y='b', c='c', s=50); + You can pass other keywords supported by matplotlib ``scatter``. Below example shows a bubble chart using a dataframe column values as bubble size. diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index 131edf499ff18..3211998b42300 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -1497,6 +1497,34 @@ def test_plot_scatter(self): axes = df.plot(x='x', y='y', kind='scatter', subplots=True) self._check_axes_shape(axes, axes_num=1, layout=(1, 1)) + @slow + def test_plot_scatter_with_c(self): + df = DataFrame(randn(6, 4), + index=list(string.ascii_letters[:6]), + columns=['x', 'y', 'z', 'four']) + + axes = [df.plot(kind='scatter', x='x', y='y', c='z'), + df.plot(kind='scatter', x=0, y=1, c=2)] + for ax in axes: + # default to RdBu + self.assertEqual(ax.collections[0].cmap.name, 'RdBu') + # n.b. there appears to be no public method to get the colorbar + # label + self.assertEqual(ax.collections[0].colorbar._label, 'z') + + cm = 'cubehelix' + ax = df.plot(kind='scatter', x='x', y='y', c='z', colormap=cm) + self.assertEqual(ax.collections[0].cmap.name, cm) + + # verify turning off colorbar works + ax = df.plot(kind='scatter', x='x', y='y', c='z', colorbar=False) + self.assertIs(ax.collections[0].colorbar, None) + + # verify that we can still plot a solid color + ax = df.plot(x=0, y=1, c='red', kind='scatter') + self.assertIs(ax.collections[0].colorbar, None) + self._check_colors(ax.collections, facecolors=['r']) + @slow def test_plot_bar(self): df = DataFrame(randn(6, 4), diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 56316ac726c8a..7a68da3ad14f2 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -1368,32 +1368,55 @@ def _get_errorbars(self, label=None, index=None, xerr=True, yerr=True): class ScatterPlot(MPLPlot): _layout_type = 'single' - def __init__(self, data, x, y, **kwargs): + def __init__(self, data, x, y, c=None, **kwargs): MPLPlot.__init__(self, data, **kwargs) - self.kwds.setdefault('c', self.plt.rcParams['patch.facecolor']) if x is None or y is None: raise ValueError( 'scatter requires and x and y column') if com.is_integer(x) and not self.data.columns.holds_integer(): x = self.data.columns[x] if com.is_integer(y) and not self.data.columns.holds_integer(): y = self.data.columns[y] + if com.is_integer(c) and not self.data.columns.holds_integer(): + c = self.data.columns[c] self.x = x self.y = y + self.c = c @property def nseries(self): return 1 def _make_plot(self): - x, y, data = self.x, self.y, self.data + import matplotlib.pyplot as plt + + x, y, c, data = self.x, self.y, self.c, self.data ax = self.axes[0] + # plot a colorbar only if a colormap is provided or necessary + cb = self.kwds.pop('colorbar', self.colormap or c in self.data.columns) + + # pandas uses colormap, matplotlib uses cmap. + cmap = self.colormap or 'RdBu' + cmap = plt.cm.get_cmap(cmap) + + if c is None: + c_values = self.plt.rcParams['patch.facecolor'] + elif c in self.data.columns: + c_values = self.data[c].values + else: + c_values = c + if self.legend and hasattr(self, 'label'): label = self.label else: label = None - scatter = ax.scatter(data[x].values, data[y].values, label=label, - **self.kwds) + scatter = ax.scatter(data[x].values, data[y].values, c=c_values, + label=label, cmap=cmap, **self.kwds) + if cb: + img = ax.collections[0] + cb_label = c if c in self.data.columns else '' + self.fig.colorbar(img, ax=ax, label=cb_label) + self._add_legend_handle(scatter, label) errors_x = self._get_errorbars(label=x, index=0, yerr=False) @@ -2259,6 +2282,8 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True, colormap : str or matplotlib colormap object, default None Colormap to select colors from. If string, load colormap with that name from matplotlib. + colorbar : boolean, optional + If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots) position : float Specify relative alignments for bar plot layout. From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center) @@ -2285,6 +2310,9 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True, `C` specifies the value at each `(x, y)` point and `reduce_C_function` is a function of one argument that reduces all the values in a bin to a single number (e.g. `mean`, `max`, `sum`, `std`). + + If `kind`='scatter' and the argument `c` is the name of a dataframe column, + the values of that column are used to color each point. """ kind = _get_standard_kind(kind.lower().strip())
`matplotlib.pyplot.scatter` supports the argument `c` for setting the color of each point. This patch lets you easily set it by giving a column name (currently you need to supply an ndarray to make it work, since pandas isn't aware of it): ``` df.plot('x', 'y', c='z', kind='scatter') ``` vs ``` df.plot('x', 'y', c=df['z'].values, kind='scatter') ``` While I was at it, I noticed that `kind='scatter'` did not support the `colormap` argument that some of the other methods support (notably `kind='hexbin'`). So I added it, too. This change should be almost entirely backwards compatible, unless folks are naming columns in their data frame valid matplotlib colors and using the same color name for the `c` argument. A colorbar will also be added automatically if relevant.
https://api.github.com/repos/pandas-dev/pandas/pulls/7780
2014-07-18T05:52:42Z
2014-09-11T19:48:42Z
2014-09-11T19:48:42Z
2014-09-17T22:27:52Z
BUG: unwanted conversions of timedelta dtypes when in a mixed datetimelike frame (GH7778)
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index 128ddbd4a9ec3..eb58f46f0f3fe 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -186,7 +186,7 @@ Bug Fixes ~~~~~~~~~ - Bug in ``Series.astype("unicode")`` not calling ``unicode`` on the values correctly (:issue:`7758`) - +- Bug in ``DataFrame.as_matrix()`` with mixed ``datetime64[ns]`` and ``timedelta64[ns]`` dtypes (:issue:`7778`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index a461dd0e247f2..17bef8dd28cf4 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3539,6 +3539,7 @@ def _apply_standard(self, func, axis, ignore_failures=False, reduce=True): except Exception: pass + dtype = object if self._is_mixed_type else None if axis == 0: series_gen = (self.icol(i) for i in range(len(self.columns))) res_index = self.columns @@ -3547,7 +3548,7 @@ def _apply_standard(self, func, axis, ignore_failures=False, reduce=True): res_index = self.index res_columns = self.columns values = self.values - series_gen = (Series.from_array(arr, index=res_columns, name=name) + series_gen = (Series.from_array(arr, index=res_columns, name=name, dtype=dtype) for i, (arr, name) in enumerate(zip(values, res_index))) else: # pragma : no cover diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 2bd318ec2430f..f649baeb16278 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -25,7 +25,7 @@ from pandas.util.decorators import cache_readonly from pandas.tslib import Timestamp -from pandas import compat +from pandas import compat, _np_version_under1p7 from pandas.compat import range, map, zip, u from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type @@ -1290,6 +1290,16 @@ def to_native_types(self, slicer=None, na_rep=None, **kwargs): return rvalues.tolist() + def get_values(self, dtype=None): + # return object dtypes as datetime.timedeltas + if dtype == object: + if _np_version_under1p7: + return self.values.astype('object') + return lib.map_infer(self.values.ravel(), + lambda x: timedelta(microseconds=x.item()/1000) + ).reshape(self.values.shape) + return self.values + class BoolBlock(NumericBlock): __slots__ = () is_bool = True @@ -2595,7 +2605,7 @@ def as_matrix(self, items=None): else: mgr = self - if self._is_single_block: + if self._is_single_block or not self.is_mixed_type: return mgr.blocks[0].get_values() else: return mgr._interleave() @@ -3647,9 +3657,11 @@ def _lcd_dtype(l): has_non_numeric = have_dt64 or have_td64 or have_cat if (have_object or - (have_bool and have_numeric) or + (have_bool and (have_numeric or have_dt64 or have_td64)) or (have_numeric and has_non_numeric) or - have_cat): + have_cat or + have_dt64 or + have_td64): return np.dtype(object) elif have_bool: return np.dtype(bool) @@ -3670,10 +3682,6 @@ def _lcd_dtype(l): return np.dtype('int%s' % (lcd.itemsize * 8 * 2)) return lcd - elif have_dt64 and not have_float and not have_complex: - return np.dtype('M8[ns]') - elif have_td64 and not have_float and not have_complex: - return np.dtype('m8[ns]') elif have_complex: return np.dtype('c16') else: diff --git a/pandas/core/series.py b/pandas/core/series.py index eff558d875c4a..9abc8f22009b3 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -237,14 +237,14 @@ def __init__(self, data=None, index=None, dtype=None, name=None, self._set_axis(0, index, fastpath=True) @classmethod - def from_array(cls, arr, index=None, name=None, copy=False, + def from_array(cls, arr, index=None, name=None, dtype=None, copy=False, fastpath=False): # return a sparse series here if isinstance(arr, ABCSparseArray): from pandas.sparse.series import SparseSeries cls = SparseSeries - return cls(arr, index=index, name=name, copy=copy, fastpath=fastpath) + return cls(arr, index=index, name=name, dtype=dtype, copy=copy, fastpath=fastpath) @property def _constructor(self): diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 2e1bbc88e36ff..df00edc46eed2 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -9635,6 +9635,15 @@ def test_apply(self): [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c']) self.assertRaises(ValueError, df.apply, lambda x: x, 2) + def test_apply_mixed_datetimelike(self): + tm._skip_if_not_numpy17_friendly() + + # mixed datetimelike + # GH 7778 + df = DataFrame({ 'A' : date_range('20130101',periods=3), 'B' : pd.to_timedelta(np.arange(3),unit='s') }) + result = df.apply(lambda x: x, axis=1) + assert_frame_equal(result, df) + def test_apply_empty(self): # empty applied = self.empty.apply(np.sqrt) diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py index 8a9010084fd99..36dbced6eda8c 100644 --- a/pandas/tests/test_internals.py +++ b/pandas/tests/test_internals.py @@ -3,7 +3,7 @@ import nose import numpy as np -from pandas import Index, MultiIndex, DataFrame, Series +from pandas import Index, MultiIndex, DataFrame, Series, Categorical from pandas.compat import OrderedDict, lrange from pandas.sparse.array import SparseArray from pandas.core.internals import * @@ -41,9 +41,11 @@ def create_block(typestr, placement, item_shape=None, num_offset=0): * complex, c16, c8 * bool * object, string, O - * datetime, dt + * datetime, dt, M8[ns] + * timedelta, td, m8[ns] * sparse (SparseArray with fill_value=0.0) * sparse_na (SparseArray with fill_value=np.nan) + * category, category2 """ placement = BlockPlacement(placement) @@ -67,8 +69,14 @@ def create_block(typestr, placement, item_shape=None, num_offset=0): shape) elif typestr in ('bool'): values = np.ones(shape, dtype=np.bool_) - elif typestr in ('datetime', 'dt'): + elif typestr in ('datetime', 'dt', 'M8[ns]'): values = (mat * 1e9).astype('M8[ns]') + elif typestr in ('timedelta', 'td', 'm8[ns]'): + values = (mat * 1).astype('m8[ns]') + elif typestr in ('category'): + values = Categorical([1,1,2,2,3,3,3,3,4,4]) + elif typestr in ('category2'): + values = Categorical(['a','a','a','a','b','b','c','c','c','d']) elif typestr in ('sparse', 'sparse_na'): # FIXME: doesn't support num_rows != 10 assert shape[-1] == 10 @@ -556,7 +564,54 @@ def _compare(old_mgr, new_mgr): self.assertEqual(new_mgr.get('h').dtype, np.float16) def test_interleave(self): - pass + + + # self + for dtype in ['f8','i8','object','bool','complex','M8[ns]','m8[ns]']: + mgr = create_mgr('a: {0}'.format(dtype)) + self.assertEqual(mgr.as_matrix().dtype,dtype) + mgr = create_mgr('a: {0}; b: {0}'.format(dtype)) + self.assertEqual(mgr.as_matrix().dtype,dtype) + + # will be converted according the actual dtype of the underlying + mgr = create_mgr('a: category') + self.assertEqual(mgr.as_matrix().dtype,'i8') + mgr = create_mgr('a: category; b: category') + self.assertEqual(mgr.as_matrix().dtype,'i8'), + mgr = create_mgr('a: category; b: category2') + self.assertEqual(mgr.as_matrix().dtype,'object') + mgr = create_mgr('a: category2') + self.assertEqual(mgr.as_matrix().dtype,'object') + mgr = create_mgr('a: category2; b: category2') + self.assertEqual(mgr.as_matrix().dtype,'object') + + # combinations + mgr = create_mgr('a: f8') + self.assertEqual(mgr.as_matrix().dtype,'f8') + mgr = create_mgr('a: f8; b: i8') + self.assertEqual(mgr.as_matrix().dtype,'f8') + mgr = create_mgr('a: f4; b: i8') + self.assertEqual(mgr.as_matrix().dtype,'f4') + mgr = create_mgr('a: f4; b: i8; d: object') + self.assertEqual(mgr.as_matrix().dtype,'object') + mgr = create_mgr('a: bool; b: i8') + self.assertEqual(mgr.as_matrix().dtype,'object') + mgr = create_mgr('a: complex') + self.assertEqual(mgr.as_matrix().dtype,'complex') + mgr = create_mgr('a: f8; b: category') + self.assertEqual(mgr.as_matrix().dtype,'object') + mgr = create_mgr('a: M8[ns]; b: category') + self.assertEqual(mgr.as_matrix().dtype,'object') + mgr = create_mgr('a: M8[ns]; b: bool') + self.assertEqual(mgr.as_matrix().dtype,'object') + mgr = create_mgr('a: M8[ns]; b: i8') + self.assertEqual(mgr.as_matrix().dtype,'object') + mgr = create_mgr('a: m8[ns]; b: bool') + self.assertEqual(mgr.as_matrix().dtype,'object') + mgr = create_mgr('a: m8[ns]; b: i8') + self.assertEqual(mgr.as_matrix().dtype,'object') + mgr = create_mgr('a: M8[ns]; b: m8[ns]') + self.assertEqual(mgr.as_matrix().dtype,'object') def test_interleave_non_unique_cols(self): df = DataFrame([
closes #7778 TST: tests for internals/as_matrix() for all dtypes (including categoricals)
https://api.github.com/repos/pandas-dev/pandas/pulls/7779
2014-07-17T23:34:17Z
2014-07-18T01:00:30Z
2014-07-18T01:00:30Z
2014-07-18T01:00:30Z
BUG: Fix for passing multiple ints as levels in DataFrame.stack() (#7660)
diff --git a/doc/source/reshaping.rst b/doc/source/reshaping.rst index db68c0eb224e2..ab9018da4c41a 100644 --- a/doc/source/reshaping.rst +++ b/doc/source/reshaping.rst @@ -160,10 +160,34 @@ the level numbers: stacked.unstack('second') +.. _reshaping.stack_multiple: + You may also stack or unstack more than one level at a time by passing a list of levels, in which case the end result is as if each level in the list were processed individually. +.. ipython:: python + + columns = MultiIndex.from_tuples([ + ('A', 'cat', 'long'), ('B', 'cat', 'long'), + ('A', 'dog', 'short'), ('B', 'dog', 'short') + ], + names=['exp', 'animal', 'hair_length'] + ) + df = DataFrame(randn(4, 4), columns=columns) + df + + df.stack(level=['animal', 'hair_length']) + +The list of levels can contain either level names or level numbers (but +not a mixture of the two). + +.. ipython:: python + + # df.stack(level=['animal', 'hair_length']) + # from above is equivalent to: + df.stack(level=[1, 2]) + These functions are intelligent about handling missing data and do not expect each subgroup within the hierarchical index to have the same set of labels. They also can handle the index being unsorted (but you can make it sorted by diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index 5e3f97944c243..aa57004a70e29 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -30,6 +30,11 @@ users upgrade to this version. API changes ~~~~~~~~~~~ +- Passing multiple levels to `DataFrame.stack()` will now work when multiple level + numbers are passed (:issue:`7660`), and will raise a ``ValueError`` when the + levels aren't all level names or all level numbers. See + :ref:`Reshaping by stacking and unstacking <reshaping.stack_multiple>`. + .. _whatsnew_0150.cat: Categoricals in Series/DataFrame diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 4f558dda756dd..04fe9e8d35359 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3311,13 +3311,10 @@ def stack(self, level=-1, dropna=True): ------- stacked : DataFrame or Series """ - from pandas.core.reshape import stack + from pandas.core.reshape import stack, stack_multiple if isinstance(level, (tuple, list)): - result = self - for lev in level: - result = stack(result, lev, dropna=dropna) - return result + return stack_multiple(self, level, dropna=dropna) else: return stack(self, level, dropna=dropna) diff --git a/pandas/core/index.py b/pandas/core/index.py index 6927d5a732440..81602d5240a08 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -2490,6 +2490,12 @@ def _get_level_number(self, level): raise KeyError('Level %s not found' % str(level)) elif level < 0: level += self.nlevels + if level < 0: + orig_level = level - self.nlevels + raise IndexError( + 'Too many levels: Index has only %d levels, ' + '%d is not a valid level number' % (self.nlevels, orig_level) + ) # Note: levels are zero-based elif level >= self.nlevels: raise IndexError('Too many levels: Index has only %d levels, ' diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py index 43784e15ab163..b014ede6e65a8 100644 --- a/pandas/core/reshape.py +++ b/pandas/core/reshape.py @@ -513,9 +513,7 @@ def stack(frame, level=-1, dropna=True): "names are not unique.".format(level)) raise ValueError(msg) - if isinstance(level, int) and level < 0: - level += frame.columns.nlevels - + # Will also convert negative level numbers and check if out of bounds. level = frame.columns._get_level_number(level) if isinstance(frame.columns, MultiIndex): @@ -547,6 +545,45 @@ def stack(frame, level=-1, dropna=True): return Series(new_values, index=new_index) +def stack_multiple(frame, level, dropna=True): + # If all passed levels match up to column names, no + # ambiguity about what to do + if all(lev in frame.columns.names for lev in level): + result = frame + for lev in level: + result = stack(result, lev, dropna=dropna) + + # Otherwise, level numbers may change as each successive level is stacked + elif all(isinstance(lev, int) for lev in level): + # As each stack is done, the level numbers decrease, so we need + # to account for that when level is a sequence of ints + result = frame + # _get_level_number() checks level numbers are in range and converts + # negative numbers to positive + level = [frame.columns._get_level_number(lev) for lev in level] + + # Can't iterate directly through level as we might need to change + # values as we go + for index in range(len(level)): + lev = level[index] + result = stack(result, lev, dropna=dropna) + # Decrement all level numbers greater than current, as these + # have now shifted down by one + updated_level = [] + for other in level: + if other > lev: + updated_level.append(other - 1) + else: + updated_level.append(other) + level = updated_level + + else: + raise ValueError("level should contain all level names or all level numbers, " + "not a mixture of the two.") + + return result + + def _stack_multi_columns(frame, level=-1, dropna=True): this = frame.copy() diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index df00edc46eed2..c4783bc49f0ce 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -11725,6 +11725,29 @@ def test_stack_unstack(self): assert_frame_equal(unstacked_cols.T, self.frame) assert_frame_equal(unstacked_cols_df['bar'].T, self.frame) + def test_stack_ints(self): + df = DataFrame( + np.random.randn(30, 27), + columns=MultiIndex.from_tuples( + list(itertools.product(range(3), repeat=3)) + ) + ) + assert_frame_equal( + df.stack(level=[1, 2]), + df.stack(level=1).stack(level=1) + ) + assert_frame_equal( + df.stack(level=[-2, -1]), + df.stack(level=1).stack(level=1) + ) + + df_named = df.copy() + df_named.columns.set_names(range(3), inplace=True) + assert_frame_equal( + df_named.stack(level=[1, 2]), + df_named.stack(level=1).stack(level=1) + ) + def test_unstack_bool(self): df = DataFrame([False, False], index=MultiIndex.from_arrays([['a', 'b'], ['c', 'l']]), diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index d8e17c4d1d290..5c0e500b243c9 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -834,6 +834,12 @@ def test_count_level_corner(self): columns=df.columns).fillna(0).astype(np.int64) assert_frame_equal(result, expected) + def test_get_level_number_out_of_bounds(self): + with assertRaisesRegexp(IndexError, "Too many levels"): + self.frame.index._get_level_number(2) + with assertRaisesRegexp(IndexError, "not a valid level number"): + self.frame.index._get_level_number(-3) + def test_unstack(self): # just check that it works for now unstacked = self.ymd.unstack() @@ -1005,6 +1011,22 @@ def test_stack_unstack_multiple(self): expected = self.ymd.unstack(2).unstack(1).dropna(axis=1, how='all') assert_frame_equal(unstacked, expected.ix[:, unstacked.columns]) + def test_stack_names_and_numbers(self): + unstacked = self.ymd.unstack(['year', 'month']) + + # Can't use mixture of names and numbers to stack + with assertRaisesRegexp(ValueError, "level should contain"): + unstacked.stack([0, 'month']) + + def test_stack_multiple_out_of_bounds(self): + # nlevels == 3 + unstacked = self.ymd.unstack(['year', 'month']) + + with assertRaisesRegexp(IndexError, "Too many levels"): + unstacked.stack([2, 3]) + with assertRaisesRegexp(IndexError, "not a valid level number"): + unstacked.stack([-4, -3]) + def test_unstack_period_series(self): # GH 4342 idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
closes #7660 The specific case that came up in #7660 (and originally in #7653) seems easy enough to fix, so I've covered that in the PR. I feel like this raises a few other potential problems though, e.g.: - If the list of level numbers is not in order, is there any sensible way to deal with them? I've sorted the level list in my fix, as that seems like the most straightforward way of making sure that when you do each stack, it's only the level numbers higher than the current that are affected. This might produce undesired results though, so maybe we should just raise a `ValueError` if the level numbers aren't sorted? - I'm not sure how to extend this to deal with negative level numbers.
https://api.github.com/repos/pandas-dev/pandas/pulls/7770
2014-07-16T23:51:25Z
2014-07-21T11:42:51Z
2014-07-21T11:42:51Z
2014-07-21T11:43:11Z
FIX: Documentation for for 0.14.1 change log
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index b3be2936c20b5..9e19161847327 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -239,8 +239,8 @@ Bug Fixes - Bug where ``nanops._has_infs`` doesn't work with many dtypes (:issue:`7357`) - Bug in ``StataReader.data`` where reading a 0-observation dta failed (:issue:`7369`) -- Bug in when reading Stata 13 (117) files containing fixed width strings (:issue:`7360`) -- Bug in when writing Stata files where the encoding was ignored (:issue:`7286`) +- Bug in ``StataReader`` when reading Stata 13 (117) files containing fixed width strings (:issue:`7360`) +- Bug in ``StataWriter`` where encoding was ignored (:issue:`7286`) - Bug in ``DatetimeIndex`` comparison doesn't handle ``NaT`` properly (:issue:`7529`) - Bug in passing input with ``tzinfo`` to some offsets ``apply``, ``rollforward`` or ``rollback`` resets ``tzinfo`` or raises ``ValueError`` (:issue:`7465`) - Bug in ``DatetimeIndex.to_period``, ``PeriodIndex.asobject``, ``PeriodIndex.to_timestamp`` doesn't preserve ``name`` (:issue:`7485`)
Change log is missing some key words on changes to StataReader and StataWriter.
https://api.github.com/repos/pandas-dev/pandas/pulls/7769
2014-07-16T19:03:36Z
2014-07-28T20:41:48Z
2014-07-28T20:41:48Z
2014-08-20T15:32:51Z
Docs: MultiIndex support is hardly bleeding edge, remove docs warnings.
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index 9c73c679f726a..ed5bfd0ba4804 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -1638,15 +1638,6 @@ analysis. See the :ref:`cookbook<cookbook.multi_index>` for some advanced strategies -.. note:: - - Given that hierarchical indexing is so new to the library, it is definitely - "bleeding-edge" functionality but is certainly suitable for production. But, - there may inevitably be some minor API changes as more use cases are - explored and any weaknesses in the design / implementation are identified. - pandas aims to be "eminently usable" so any feedback about new - functionality like this is extremely helpful. - Creating a MultiIndex (hierarchical index) object ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
https://api.github.com/repos/pandas-dev/pandas/pulls/7767
2014-07-16T07:25:44Z
2014-07-16T09:59:30Z
2014-07-16T09:59:30Z
2014-07-16T09:59:39Z
docs: rewrite .iloc accessing beyond ends.
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index 9c73c679f726a..8f4cb1e1e6a68 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -415,20 +415,29 @@ For getting a cross section using an integer position (equiv to ``df.xs(1)``) df1.iloc[1] -There is one significant departure from standard python/numpy slicing semantics. -python/numpy allow slicing past the end of an array without an associated error. +Out of range slice indexes are handled gracefully just as in Python/Numpy. .. ipython:: python # these are allowed in python/numpy. + # Only works in Pandas starting from v0.14.0. x = list('abcdef') + x x[4:10] x[8:10] + s = Series(x) + s + s.iloc[4:10] + s.iloc[8:10] -- as of v0.14.0, ``iloc`` will now accept out-of-bounds indexers for slices, e.g. a value that exceeds the length of the object being - indexed. These will be excluded. This will make pandas conform more with pandas/numpy indexing of out-of-bounds - values. A single indexer / list of indexers that is out-of-bounds will still raise - ``IndexError`` (:issue:`6296`, :issue:`6299`). This could result in an empty axis (e.g. an empty DataFrame being returned) +.. note:: + + Prior to v0.14.0, ``iloc`` would not accept out of bounds indexers for + slices, e.g. a value that exceeds the length of the object being indexed. + + +Note that this could result in an empty axis (e.g. an empty DataFrame being +returned) .. ipython:: python @@ -438,7 +447,9 @@ python/numpy allow slicing past the end of an array without an associated error. dfl.iloc[:,1:3] dfl.iloc[4:6] -These are out-of-bounds selections +A single indexer that is out of bounds will raise an ``IndexError``. +A list of indexers where any element is out of bounds will raise an +``IndexError`` .. code-block:: python
Let's actually talk about what the current behaviour is, not what the behaviour used to be.
https://api.github.com/repos/pandas-dev/pandas/pulls/7756
2014-07-15T02:31:45Z
2014-07-15T12:57:21Z
2014-07-15T12:57:21Z
2014-07-15T12:57:21Z
DOC: Fix typo conncection -> connection
diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 23ca80d771df9..1ee5c55c0ae06 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -264,7 +264,7 @@ def read_sql_table(table_name, con, index_col=None, coerce_float=True, table_name : string Name of SQL table in database con : SQLAlchemy engine - Sqlite DBAPI conncection mode not supported + Sqlite DBAPI connection mode not supported index_col : string, optional Column to set as index coerce_float : boolean, default True
null
https://api.github.com/repos/pandas-dev/pandas/pulls/7747
2014-07-14T04:53:52Z
2014-07-14T07:39:19Z
2014-07-14T07:39:19Z
2014-07-14T07:39:24Z
Docs fixes
diff --git a/doc/README.rst b/doc/README.rst index 1a105a7a65a81..660a3b7232891 100644 --- a/doc/README.rst +++ b/doc/README.rst @@ -33,8 +33,8 @@ Some other important things to know about the docs: itself and the docs in this folder ``pandas/doc/``. The docstrings provide a clear explanation of the usage of the individual - functions, while the documentation in this filder consists of tutorial-like - overviews per topic together with some other information (whatsnew, + functions, while the documentation in this folder consists of tutorial-like + overviews per topic together with some other information (what's new, installation, etc). - The docstrings follow the **Numpy Docstring Standard** which is used widely @@ -56,7 +56,7 @@ Some other important things to know about the docs: x = 2 x**3 - will be renderd as + will be rendered as :: @@ -66,7 +66,7 @@ Some other important things to know about the docs: Out[2]: 8 This means that almost all code examples in the docs are always run (and the - ouptut saved) during the doc build. This way, they will always be up to date, + output saved) during the doc build. This way, they will always be up to date, but it makes the doc building a bit more complex. @@ -135,12 +135,12 @@ If you want to do a full clean build, do:: Staring with 0.13.1 you can tell ``make.py`` to compile only a single section of the docs, greatly reducing the turn-around time for checking your changes. -You will be prompted to delete unrequired `.rst` files, since the last commited -version can always be restored from git. +You will be prompted to delete `.rst` files that aren't required, since the +last committed version can always be restored from git. :: - #omit autosummary and api section + #omit autosummary and API section python make.py clean python make.py --no-api diff --git a/doc/source/10min.rst b/doc/source/10min.rst index a9a97ee56813c..2111bb2d72dcb 100644 --- a/doc/source/10min.rst +++ b/doc/source/10min.rst @@ -260,7 +260,7 @@ For slicing columns explicitly df.iloc[:,1:3] -For getting a value explicity +For getting a value explicitly .. ipython:: python diff --git a/doc/source/basics.rst b/doc/source/basics.rst index 4d67616c5cd60..a503367c13427 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -346,7 +346,7 @@ General DataFrame Combine The ``combine_first`` method above calls the more general DataFrame method ``combine``. This method takes another DataFrame and a combiner function, aligns the input DataFrame and then passes the combiner function pairs of -Series (ie, columns whose names are the same). +Series (i.e., columns whose names are the same). So, for instance, to reproduce ``combine_first`` as above: @@ -1461,7 +1461,7 @@ from the current type (say ``int`` to ``float``) df3.dtypes The ``values`` attribute on a DataFrame return the *lower-common-denominator* of the dtypes, meaning -the dtype that can accommodate **ALL** of the types in the resulting homogenous dtyped numpy array. This can +the dtype that can accommodate **ALL** of the types in the resulting homogeneous dtyped numpy array. This can force some *upcasting*. .. ipython:: python diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst index 844112312cdce..fd68427a86951 100644 --- a/doc/source/cookbook.rst +++ b/doc/source/cookbook.rst @@ -499,7 +499,7 @@ The :ref:`HDFStores <io.hdf5>` docs `Merging on-disk tables with millions of rows <http://stackoverflow.com/questions/14614512/merging-two-tables-with-millions-of-rows-in-python/14617925#14617925>`__ -Deduplicating a large store by chunks, essentially a recursive reduction operation. Shows a function for taking in data from +De-duplicating a large store by chunks, essentially a recursive reduction operation. Shows a function for taking in data from csv file and creating a store by chunks, with date parsing as well. `See here <http://stackoverflow.com/questions/16110252/need-to-compare-very-large-files-around-1-5gb-in-python/16110391#16110391>`__ diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst index 7c43a03e68013..928de285982cf 100644 --- a/doc/source/dsintro.rst +++ b/doc/source/dsintro.rst @@ -118,7 +118,7 @@ provided. The value will be repeated to match the length of **index** Series is ndarray-like ~~~~~~~~~~~~~~~~~~~~~~ -``Series`` acts very similary to a ``ndarray``, and is a valid argument to most NumPy functions. +``Series`` acts very similarly to a ``ndarray``, and is a valid argument to most NumPy functions. However, things like slicing also slice the index. .. ipython :: python @@ -474,7 +474,7 @@ DataFrame: For a more exhaustive treatment of more sophisticated label-based indexing and slicing, see the :ref:`section on indexing <indexing>`. We will address the -fundamentals of reindexing / conforming to new sets of lables in the +fundamentals of reindexing / conforming to new sets of labels in the :ref:`section on reindexing <basics.reindexing>`. Data alignment and arithmetic @@ -892,7 +892,7 @@ Slicing ~~~~~~~ Slicing works in a similar manner to a Panel. ``[]`` slices the first dimension. -``.ix`` allows you to slice abitrarily and get back lower dimensional objects +``.ix`` allows you to slice arbitrarily and get back lower dimensional objects .. ipython:: python diff --git a/doc/source/enhancingperf.rst b/doc/source/enhancingperf.rst index 00c76632ce17b..e6b735173110b 100644 --- a/doc/source/enhancingperf.rst +++ b/doc/source/enhancingperf.rst @@ -553,7 +553,7 @@ standard Python. :func:`pandas.eval` Parsers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -There are two different parsers and and two different engines you can use as +There are two different parsers and two different engines you can use as the backend. The default ``'pandas'`` parser allows a more intuitive syntax for expressing diff --git a/doc/source/faq.rst b/doc/source/faq.rst index 81bebab46dac9..a613d53218ce2 100644 --- a/doc/source/faq.rst +++ b/doc/source/faq.rst @@ -144,7 +144,7 @@ Frequency conversion Frequency conversion is implemented using the ``resample`` method on TimeSeries and DataFrame objects (multiple time series). ``resample`` also works on panels -(3D). Here is some code that resamples daily data to montly: +(3D). Here is some code that resamples daily data to monthly: .. ipython:: python diff --git a/doc/source/gotchas.rst b/doc/source/gotchas.rst index 0078ffb506cc9..438e2f79c5ff3 100644 --- a/doc/source/gotchas.rst +++ b/doc/source/gotchas.rst @@ -183,7 +183,7 @@ Why not make NumPy like R? ~~~~~~~~~~~~~~~~~~~~~~~~~~ Many people have suggested that NumPy should simply emulate the ``NA`` support -present in the more domain-specific statistical programming langauge `R +present in the more domain-specific statistical programming language `R <http://r-project.org>`__. Part of the reason is the NumPy type hierarchy: .. csv-table:: @@ -500,7 +500,7 @@ parse HTML tables in the top-level pandas io function ``read_html``. molasses. However consider the fact that many tables on the web are not big enough for the parsing algorithm runtime to matter. It is more likely that the bottleneck will be in the process of reading the raw - text from the url over the web, i.e., IO (input-output). For very large + text from the URL over the web, i.e., IO (input-output). For very large tables, this might not be true. **Issues with using** |Anaconda|_ diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst index 22f1414c4f2b0..eaccbfddc1f86 100644 --- a/doc/source/groupby.rst +++ b/doc/source/groupby.rst @@ -969,7 +969,7 @@ Regroup columns of a DataFrame according to their sum, and sum the aggregated on df.groupby(df.sum(), axis=1).sum() -Returning a Series to propogate names +Returning a Series to propagate names ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Group DataFrame columns, compute a set of metrics and return a named Series. diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index 84736d4989f6f..9c73c679f726a 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -88,10 +88,10 @@ of multi-axis indexing. See more at :ref:`Selection by Position <indexing.integer>` - ``.ix`` supports mixed integer and label based access. It is primarily label - based, but will fallback to integer positional access. ``.ix`` is the most + based, but will fall back to integer positional access. ``.ix`` is the most general and will support any of the inputs to ``.loc`` and ``.iloc``, as well as support for floating point label schemes. ``.ix`` is especially useful - when dealing with mixed positional and label based hierarchial indexes. + when dealing with mixed positional and label based hierarchical indexes. As using integer slices with ``.ix`` have different behavior depending on whether the slice is interpreted as position based or label based, it's usually better to be explicit and use ``.iloc`` or ``.loc``. @@ -230,7 +230,7 @@ new column. - The ``Series/Panel`` accesses are available starting in 0.13.0. If you are using the IPython environment, you may also use tab-completion to -see these accessable attributes. +see these accessible attributes. Slicing ranges -------------- @@ -328,7 +328,7 @@ For getting values with a boolean array df1.loc['a']>0 df1.loc[:,df1.loc['a']>0] -For getting a value explicity (equiv to deprecated ``df.get_value('a','A')``) +For getting a value explicitly (equiv to deprecated ``df.get_value('a','A')``) .. ipython:: python @@ -415,7 +415,7 @@ For getting a cross section using an integer position (equiv to ``df.xs(1)``) df1.iloc[1] -There is one signficant departure from standard python/numpy slicing semantics. +There is one significant departure from standard python/numpy slicing semantics. python/numpy allow slicing past the end of an array without an associated error. .. ipython:: python @@ -494,7 +494,7 @@ out what you're asking for. If you only want to access a scalar value, the fastest way is to use the ``at`` and ``iat`` methods, which are implemented on all of the data structures. -Similary to ``loc``, ``at`` provides **label** based scalar lookups, while, ``iat`` provides **integer** based lookups analagously to ``iloc`` +Similarly to ``loc``, ``at`` provides **label** based scalar lookups, while, ``iat`` provides **integer** based lookups analogously to ``iloc`` .. ipython:: python @@ -643,7 +643,7 @@ To return a Series of the same shape as the original s.where(s > 0) -Selecting values from a DataFrame with a boolean critierion now also preserves +Selecting values from a DataFrame with a boolean criterion now also preserves input data shape. ``where`` is used under the hood as the implementation. Equivalent is ``df.where(df < 0)`` @@ -690,7 +690,7 @@ without creating a copy: **alignment** Furthermore, ``where`` aligns the input boolean condition (ndarray or DataFrame), -such that partial selection with setting is possible. This is analagous to +such that partial selection with setting is possible. This is analogous to partial setting via ``.ix`` (but on the contents rather than the axis labels) .. ipython:: python @@ -756,7 +756,7 @@ between the values of columns ``a`` and ``c``. For example: # query df.query('(a < b) & (b < c)') -Do the same thing but fallback on a named index if there is no column +Do the same thing but fall back on a named index if there is no column with the name ``a``. .. ipython:: python @@ -899,7 +899,7 @@ The ``in`` and ``not in`` operators ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :meth:`~pandas.DataFrame.query` also supports special use of Python's ``in`` and -``not in`` comparison operators, providing a succint syntax for calling the +``not in`` comparison operators, providing a succinct syntax for calling the ``isin`` method of a ``Series`` or ``DataFrame``. .. ipython:: python @@ -1416,7 +1416,7 @@ faster, and allows one to index *both* axes if so desired. Why does the assignment when using chained indexing fail! ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -So, why does this show the ``SettingWithCopy`` warning / and possibly not work when you do chained indexing and assignement: +So, why does this show the ``SettingWithCopy`` warning / and possibly not work when you do chained indexing and assignment: .. code-block:: python @@ -2149,7 +2149,7 @@ metadata, like the index ``name`` (or, for ``MultiIndex``, ``levels`` and You can use the ``rename``, ``set_names``, ``set_levels``, and ``set_labels`` to set these attributes directly. They default to returning a copy; however, -you can specify ``inplace=True`` to have the data change inplace. +you can specify ``inplace=True`` to have the data change in place. .. ipython:: python diff --git a/doc/source/io.rst b/doc/source/io.rst index cfa97ca0f3fef..fa6ab646a47c8 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -29,7 +29,7 @@ IO Tools (Text, CSV, HDF5, ...) ******************************* -The pandas I/O api is a set of top level ``reader`` functions accessed like ``pd.read_csv()`` that generally return a ``pandas`` +The pandas I/O API is a set of top level ``reader`` functions accessed like ``pd.read_csv()`` that generally return a ``pandas`` object. * :ref:`read_csv<io.read_csv_table>` @@ -78,8 +78,8 @@ for some advanced strategies They can take a number of arguments: - - ``filepath_or_buffer``: Either a string path to a file, url - (including http, ftp, and s3 locations), or any object with a ``read`` + - ``filepath_or_buffer``: Either a string path to a file, URL + (including http, ftp, and S3 locations), or any object with a ``read`` method (such as an open file or ``StringIO``). - ``sep`` or ``delimiter``: A delimiter / separator to split fields on. `read_csv` is capable of inferring the delimiter automatically in some @@ -511,7 +511,7 @@ data columns: Date Parsing Functions ~~~~~~~~~~~~~~~~~~~~~~ Finally, the parser allows you can specify a custom ``date_parser`` function to -take full advantage of the flexiblity of the date parsing API: +take full advantage of the flexibility of the date parsing API: .. ipython:: python @@ -964,7 +964,7 @@ Reading columns with a ``MultiIndex`` By specifying list of row locations for the ``header`` argument, you can read in a ``MultiIndex`` for the columns. Specifying non-consecutive -rows will skip the interveaning rows. In order to have the pre-0.13 behavior +rows will skip the intervening rows. In order to have the pre-0.13 behavior of tupleizing columns, specify ``tupleize_cols=True``. .. ipython:: python @@ -1038,7 +1038,7 @@ rather than reading the entire file into memory, such as the following: table -By specifiying a ``chunksize`` to ``read_csv`` or ``read_table``, the return +By specifying a ``chunksize`` to ``read_csv`` or ``read_table``, the return value will be an iterable object of type ``TextFileReader``: .. ipython:: python @@ -1100,7 +1100,7 @@ function takes a number of arguments. Only the first is required. used. (A sequence should be given if the DataFrame uses MultiIndex). - ``mode`` : Python write mode, default 'w' - ``encoding``: a string representing the encoding to use if the contents are - non-ascii, for python versions prior to 3 + non-ASCII, for python versions prior to 3 - ``line_terminator``: Character sequence denoting line end (default '\\n') - ``quoting``: Set quoting rules as in csv module (default csv.QUOTE_MINIMAL) - ``quotechar``: Character used to quote fields (default '"') @@ -1184,7 +1184,7 @@ with optional parameters: - ``double_precision`` : The number of decimal places to use when encoding floating point values, default 10. - ``force_ascii`` : force encoded string to be ASCII, default True. - ``date_unit`` : The time unit to encode to, governs timestamp and ISO8601 precision. One of 's', 'ms', 'us' or 'ns' for seconds, milliseconds, microseconds and nanoseconds respectively. Default 'ms'. -- ``default_handler`` : The handler to call if an object cannot otherwise be converted to a suitable format for JSON. Takes a single argument, which is the object to convert, and returns a serialisable object. +- ``default_handler`` : The handler to call if an object cannot otherwise be converted to a suitable format for JSON. Takes a single argument, which is the object to convert, and returns a serializable object. Note ``NaN``'s, ``NaT``'s and ``None`` will be converted to ``null`` and ``datetime`` objects will be converted based on the ``date_format`` and ``date_unit`` parameters. @@ -1208,7 +1208,7 @@ file / string. Consider the following DataFrame and Series: sjo = Series(dict(x=15, y=16, z=17), name='D') sjo -**Column oriented** (the default for ``DataFrame``) serialises the data as +**Column oriented** (the default for ``DataFrame``) serializes the data as nested JSON objects with column labels acting as the primary index: .. ipython:: python @@ -1224,7 +1224,7 @@ but the index labels are now primary: dfjo.to_json(orient="index") sjo.to_json(orient="index") -**Record oriented** serialises the data to a JSON array of column -> value records, +**Record oriented** serializes the data to a JSON array of column -> value records, index labels are not included. This is useful for passing DataFrame data to plotting libraries, for example the JavaScript library d3.js: @@ -1233,7 +1233,7 @@ libraries, for example the JavaScript library d3.js: dfjo.to_json(orient="records") sjo.to_json(orient="records") -**Value oriented** is a bare-bones option which serialises to nested JSON arrays of +**Value oriented** is a bare-bones option which serializes to nested JSON arrays of values only, column and index labels are not included: .. ipython:: python @@ -1241,7 +1241,7 @@ values only, column and index labels are not included: dfjo.to_json(orient="values") # Not available for Series -**Split oriented** serialises to a JSON object containing separate entries for +**Split oriented** serializes to a JSON object containing separate entries for values, index and columns. Name is also included for ``Series``: .. ipython:: python @@ -1252,13 +1252,13 @@ values, index and columns. Name is also included for ``Series``: .. note:: Any orient option that encodes to a JSON object will not preserve the ordering of - index and column labels during round-trip serialisation. If you wish to preserve + index and column labels during round-trip serialization. If you wish to preserve label ordering use the `split` option as it uses ordered containers. Date Handling +++++++++++++ -Writing in iso date format +Writing in ISO date format .. ipython:: python @@ -1268,7 +1268,7 @@ Writing in iso date format json = dfd.to_json(date_format='iso') json -Writing in iso date format, with microseconds +Writing in ISO date format, with microseconds .. ipython:: python @@ -1297,17 +1297,17 @@ Writing to a file, with a date index and a date column Fallback Behavior +++++++++++++++++ -If the JSON serialiser cannot handle the container contents directly it will fallback in the following manner: +If the JSON serializer cannot handle the container contents directly it will fallback in the following manner: - if a ``toDict`` method is defined by the unrecognised object then that - will be called and its returned ``dict`` will be JSON serialised. + will be called and its returned ``dict`` will be JSON serialized. - if a ``default_handler`` has been passed to ``to_json`` that will be called to convert the object. - otherwise an attempt is made to convert the object to a ``dict`` by parsing its contents. However if the object is complex this will often fail with an ``OverflowError``. -Your best bet when encountering ``OverflowError`` during serialisation +Your best bet when encountering ``OverflowError`` during serialization is to specify a ``default_handler``. For example ``timedelta`` can cause problems: @@ -1346,10 +1346,10 @@ Reading JSON Reading a JSON string to pandas object can take a number of parameters. The parser will try to parse a ``DataFrame`` if ``typ`` is not supplied or -is ``None``. To explicity force ``Series`` parsing, pass ``typ=series`` +is ``None``. To explicitly force ``Series`` parsing, pass ``typ=series`` - ``filepath_or_buffer`` : a **VALID** JSON string or file handle / StringIO. The string could be - a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host + a URL. Valid URL schemes include http, ftp, S3, and file. For file URLs, a host is expected. For instance, a local file could be file ://localhost/path/to/table.json - ``typ`` : type of object to recover (series or frame), default 'frame' @@ -1377,8 +1377,8 @@ is ``None``. To explicity force ``Series`` parsing, pass ``typ=series`` - ``dtype`` : if True, infer dtypes, if a dict of column to dtype, then use those, if False, then don't infer dtypes at all, default is True, apply only to the data - ``convert_axes`` : boolean, try to convert the axes to the proper dtypes, default is True -- ``convert_dates`` : a list of columns to parse for dates; If True, then try to parse datelike columns, default is True -- ``keep_default_dates`` : boolean, default True. If parsing dates, then parse the default datelike columns +- ``convert_dates`` : a list of columns to parse for dates; If True, then try to parse date-like columns, default is True +- ``keep_default_dates`` : boolean, default True. If parsing dates, then parse the default date-like columns - ``numpy`` : direct decoding to numpy arrays. default is False; Supports numeric data only, although labels may be non-numeric. Also note that the JSON ordering **MUST** be the same for each term if ``numpy=True`` - ``precise_float`` : boolean, default ``False``. Set to enable usage of higher precision (strtod) function when decoding string to double values. Default (``False``) is to use fast but less precise builtin functionality @@ -1387,7 +1387,7 @@ is ``None``. To explicity force ``Series`` parsing, pass ``typ=series`` then pass one of 's', 'ms', 'us' or 'ns' to force timestamp precision to seconds, milliseconds, microseconds or nanoseconds respectively. -The parser will raise one of ``ValueError/TypeError/AssertionError`` if the JSON is not parsable. +The parser will raise one of ``ValueError/TypeError/AssertionError`` if the JSON is not parseable. If a non-default ``orient`` was used when encoding to JSON be sure to pass the same option here so that decoding produces sensible results, see `Orient Options`_ for an @@ -1438,7 +1438,7 @@ Specify dtypes for conversion: pd.read_json('test.json', dtype={'A' : 'float32', 'bools' : 'int8'}).dtypes -Preserve string indicies: +Preserve string indices: .. ipython:: python @@ -1480,7 +1480,7 @@ The Numpy Parameter This supports numeric data only. Index and columns labels may be non-numeric, e.g. strings, dates etc. If ``numpy=True`` is passed to ``read_json`` an attempt will be made to sniff -an appropriate dtype during deserialisation and to subsequently decode directly +an appropriate dtype during deserialization and to subsequently decode directly to numpy arrays, bypassing the need for intermediate Python objects. This can provide speedups if you are deserialising a large amount of numeric @@ -1502,7 +1502,7 @@ data: timeit read_json(jsonfloats, numpy=True) -The speedup is less noticable for smaller datasets: +The speedup is less noticeable for smaller datasets: .. ipython:: python @@ -1586,7 +1586,7 @@ Reading HTML Content .. versionadded:: 0.12.0 The top-level :func:`~pandas.io.html.read_html` function can accept an HTML -string/file/url and will parse HTML tables into list of pandas DataFrames. +string/file/URL and will parse HTML tables into list of pandas DataFrames. Let's look at a few examples. .. note:: @@ -2381,7 +2381,7 @@ hierarchical path-name like format (e.g. ``foo/bar/bah``), which will generate a hierarchy of sub-stores (or ``Groups`` in PyTables parlance). Keys can be specified with out the leading '/' and are ALWAYS absolute (e.g. 'foo' refers to '/foo'). Removal operations can remove -everying in the sub-store and BELOW, so be *careful*. +everything in the sub-store and BELOW, so be *careful*. .. ipython:: python @@ -2516,7 +2516,7 @@ The ``indexers`` are on the left-hand side of the sub-expression: - ``columns``, ``major_axis``, ``ts`` -The right-hand side of the sub-expression (after a comparsion operator) can be: +The right-hand side of the sub-expression (after a comparison operator) can be: - functions that will be evaluated, e.g. ``Timestamp('2012-02-01')`` - strings, e.g. ``"bar"`` @@ -2696,7 +2696,7 @@ be data_columns # columns are stored separately as ``PyTables`` columns store.root.df_dc.table -There is some performance degredation by making lots of columns into +There is some performance degradation by making lots of columns into `data columns`, so it is up to the user to designate these. In addition, you cannot change data columns (nor indexables) after the first append/put operation (Of course you can simply read in the data and @@ -2935,7 +2935,7 @@ after the fact. - ``ptrepack --chunkshape=auto --propindexes --complevel=9 --complib=blosc in.h5 out.h5`` Furthermore ``ptrepack in.h5 out.h5`` will *repack* the file to allow -you to reuse previously deleted space. Aalternatively, one can simply +you to reuse previously deleted space. Alternatively, one can simply remove the file and write again, or use the ``copy`` method. .. _io.hdf5-notes: @@ -2996,7 +2996,7 @@ Currently, ``unicode`` and ``datetime`` columns (represented with a dtype of ``object``), **WILL FAIL**. In addition, even though a column may look like a ``datetime64[ns]``, if it contains ``np.nan``, this **WILL FAIL**. You can try to convert datetimelike columns to proper -``datetime64[ns]`` columns, that possibily contain ``NaT`` to represent +``datetime64[ns]`` columns, that possibly contain ``NaT`` to represent invalid values. (Some of these issues have been addressed and these conversion may not be necessary in future versions of pandas) @@ -3025,7 +3025,7 @@ may introduce a string for a column **larger** than the column can hold, an Exce could have a silent truncation of these columns, leading to loss of information). In the future we may relax this and allow a user-specified truncation to occur. -Pass ``min_itemsize`` on the first table creation to a-priori specifiy the minimum length of a particular string column. +Pass ``min_itemsize`` on the first table creation to a-priori specify the minimum length of a particular string column. ``min_itemsize`` can be an integer, or a dict mapping a column name to an integer. You can pass ``values`` as a key to allow all *indexables* or *data_columns* to have this min_itemsize. @@ -3070,7 +3070,7 @@ External Compatibility ~~~~~~~~~~~~~~~~~~~~~~ ``HDFStore`` write ``table`` format objects in specific formats suitable for -producing loss-less roundtrips to pandas objects. For external +producing loss-less round trips to pandas objects. For external compatibility, ``HDFStore`` can read native ``PyTables`` format tables. It is possible to write an ``HDFStore`` object that can easily be imported into ``R`` using the ``rhdf5`` library. Create a table @@ -3136,7 +3136,7 @@ Performance generally longer as compared with regular stores. Query times can be quite fast, especially on an indexed axis. - You can pass ``chunksize=<int>`` to ``append``, specifying the - write chunksize (default is 50000). This will signficantly lower + write chunksize (default is 50000). This will significantly lower your memory usage on writing. - You can pass ``expectedrows=<int>`` to the first ``append``, to set the TOTAL number of expected rows that ``PyTables`` will @@ -3304,7 +3304,7 @@ And you can explicitly force columns to be parsed as dates: pd.read_sql_table('data', engine, parse_dates=['Date']) -If needed you can explicitly specifiy a format string, or a dict of arguments +If needed you can explicitly specify a format string, or a dict of arguments to pass to :func:`pandas.to_datetime`: .. code-block:: python @@ -3456,7 +3456,7 @@ response code of Google BigQuery can be successful (200) even if the append failed. For this reason, if there is a failure to append to the table, the complete error response from BigQuery is returned which can be quite long given it provides a status for each row. You may want -to start with smaller chuncks to test that the size and types of your +to start with smaller chunks to test that the size and types of your dataframe match your destination table to make debugging simpler. .. code-block:: python @@ -3470,7 +3470,7 @@ The BigQuery SQL query language has some oddities, see `here <https://developers While BigQuery uses SQL-like syntax, it has some important differences from traditional databases both in functionality, API limitations (size and -qunatity of queries or uploads), and how Google charges for use of the service. +quantity of queries or uploads), and how Google charges for use of the service. You should refer to Google documentation often as the service seems to be changing and evolving. BiqQuery is best for analyzing large sets of data quickly, but it is not a direct replacement for a transactional database. @@ -3522,7 +3522,7 @@ converting them to a DataFrame which is returned: Currently the ``index`` is retrieved as a column on read back. -The parameter ``convert_categoricals`` indicates wheter value labels should be +The parameter ``convert_categoricals`` indicates whether value labels should be read and used to create a ``Categorical`` variable from them. Value labels can also be retrieved by the function ``variable_labels``, which requires data to be called before (see ``pandas.io.stata.StataReader``). diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst index 9263eb2cedf9b..b0319c01b2737 100644 --- a/doc/source/missing_data.rst +++ b/doc/source/missing_data.rst @@ -548,7 +548,7 @@ will be replaced with a scalar (list of regex -> regex) All of the regular expression examples can also be passed with the ``to_replace`` argument as the ``regex`` argument. In this case the ``value`` -argument must be passed explicity by name or ``regex`` must be a nested +argument must be passed explicitly by name or ``regex`` must be a nested dictionary. The previous example, in this case, would then be .. ipython:: python @@ -566,7 +566,7 @@ want to use a regular expression. Numeric Replacement ~~~~~~~~~~~~~~~~~~~ -Similiar to ``DataFrame.fillna`` +Similar to ``DataFrame.fillna`` .. ipython:: python :suppress: diff --git a/doc/source/options.rst b/doc/source/options.rst index 961797acb00aa..1e8517014bfc5 100644 --- a/doc/source/options.rst +++ b/doc/source/options.rst @@ -166,7 +166,7 @@ dataframes to stretch across pages, wrapped over the full column vs row-wise. pd.reset_option('max_rows') ``display.max_columnwidth`` sets the maximum width of columns. Cells -of this length or longer will be truncated with an elipsis. +of this length or longer will be truncated with an ellipsis. .. ipython:: python diff --git a/doc/source/overview.rst b/doc/source/overview.rst index 8e47466385e77..49a788def2854 100644 --- a/doc/source/overview.rst +++ b/doc/source/overview.rst @@ -18,7 +18,7 @@ Package overview * Input/Output tools: loading tabular data from flat files (CSV, delimited, Excel 2003), and saving and loading pandas objects from the fast and efficient PyTables/HDF5 format. - * Memory-efficent "sparse" versions of the standard data structures for storing + * Memory-efficient "sparse" versions of the standard data structures for storing data that is mostly missing or mostly constant (some fixed value) * Moving window statistics (rolling mean, rolling standard deviation, etc.) * Static and moving window linear and `panel regression diff --git a/doc/source/release.rst b/doc/source/release.rst index e490cb330a497..9dc96219f42d9 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -301,8 +301,8 @@ Improvements to existing features limit precision based on the values in the array (:issue:`3401`) - ``pd.show_versions()`` is now available for convenience when reporting issues. - perf improvements to Series.str.extract (:issue:`5944`) -- perf improvments in ``dtypes/ftypes`` methods (:issue:`5968`) -- perf improvments in indexing with object dtypes (:issue:`5968`) +- perf improvements in ``dtypes/ftypes`` methods (:issue:`5968`) +- perf improvements in indexing with object dtypes (:issue:`5968`) - improved dtype inference for ``timedelta`` like passed to constructors (:issue:`5458`, :issue:`5689`) - escape special characters when writing to latex (:issue: `5374`) - perf improvements in ``DataFrame.apply`` (:issue:`6013`) @@ -329,7 +329,7 @@ Bug Fixes - Bug in groupby dtype conversion with datetimelike (:issue:`5869`) - Regression in handling of empty Series as indexers to Series (:issue:`5877`) - Bug in internal caching, related to (:issue:`5727`) -- Testing bug in reading json/msgpack from a non-filepath on windows under py3 (:issue:`5874`) +- Testing bug in reading JSON/msgpack from a non-filepath on windows under py3 (:issue:`5874`) - Bug when assigning to .ix[tuple(...)] (:issue:`5896`) - Bug in fully reindexing a Panel (:issue:`5905`) - Bug in idxmin/max with object dtypes (:issue:`5914`) @@ -337,7 +337,7 @@ Bug Fixes - Bug in assigning to chained series with a series via ix (:issue:`5928`) - Bug in creating an empty DataFrame, copying, then assigning (:issue:`5932`) - Bug in DataFrame.tail with empty frame (:issue:`5846`) -- Bug in propogating metadata on ``resample`` (:issue:`5862`) +- Bug in propagating metadata on ``resample`` (:issue:`5862`) - Fixed string-representation of ``NaT`` to be "NaT" (:issue:`5708`) - Fixed string-representation for Timestamp to show nanoseconds if present (:issue:`5912`) - ``pd.match`` not returning passed sentinel @@ -638,7 +638,7 @@ API Changes - support ``timedelta64[ns]`` as a serialization type (:issue:`3577`) - store `datetime.date` objects as ordinals rather then timetuples to avoid timezone issues (:issue:`2852`), thanks @tavistmorph and @numpand - - ``numexpr`` 2.2.2 fixes incompatiblity in PyTables 2.4 (:issue:`4908`) + - ``numexpr`` 2.2.2 fixes incompatibility in PyTables 2.4 (:issue:`4908`) - ``flush`` now accepts an ``fsync`` parameter, which defaults to ``False`` (:issue:`5364`) - ``unicode`` indices not supported on ``table`` formats (:issue:`5386`) @@ -649,7 +649,7 @@ API Changes Options are seconds, milliseconds, microseconds and nanoseconds. (:issue:`4362`, :issue:`4498`). - added ``default_handler`` parameter to allow a callable to be passed - which will be responsible for handling otherwise unserialisable objects. + which will be responsible for handling otherwise unserialiable objects. (:issue:`5138`) - ``Index`` and ``MultiIndex`` changes (:issue:`4039`): @@ -723,7 +723,7 @@ API Changes ``SparsePanel``, etc.), now support the entire set of arithmetic operators and arithmetic flex methods (add, sub, mul, etc.). ``SparsePanel`` does not support ``pow`` or ``mod`` with non-scalars. (:issue:`3765`) -- Arithemtic func factories are now passed real names (suitable for using +- Arithmetic func factories are now passed real names (suitable for using with super) (:issue:`5240`) - Provide numpy compatibility with 1.7 for a calling convention like ``np.prod(pandas_object)`` as numpy call with additional keyword args @@ -802,7 +802,7 @@ See :ref:`Internal Refactoring<whatsnew_0130.refactoring>` - ``swapaxes`` on a ``Panel`` with the same axes specified now return a copy - support attribute access for setting - - ``filter`` supports same api as original ``DataFrame`` filter + - ``filter`` supports same API as original ``DataFrame`` filter - ``fillna`` refactored to ``core/generic.py``, while > 3ndim is ``NotImplemented`` @@ -836,7 +836,7 @@ See :ref:`Internal Refactoring<whatsnew_0130.refactoring>` - added ``ftypes`` method to Series/DataFame, similar to ``dtypes``, but indicates if the underlying is sparse/dense (as well as the dtype) - All ``NDFrame`` objects now have a ``_prop_attributes``, which can be used - to indcated various values to propogate to a new object from an existing + to indicate various values to propagate to a new object from an existing (e.g. name in ``Series`` will follow more automatically now) - Internal type checking is now done via a suite of generated classes, allowing ``isinstance(value, klass)`` without having to directly import the @@ -855,7 +855,7 @@ See :ref:`Internal Refactoring<whatsnew_0130.refactoring>` elements (:issue:`1903`) - Refactor ``clip`` methods to core/generic.py (:issue:`4798`) - Refactor of ``_get_numeric_data/_get_bool_data`` to core/generic.py, - allowing Series/Panel functionaility + allowing Series/Panel functionality - Refactor of Series arithmetic with time-like objects (datetime/timedelta/time etc.) into a separate, cleaned up wrapper class. (:issue:`4613`) @@ -927,7 +927,7 @@ Bug Fixes as the docstring says (:issue:`4362`). - ``as_index`` is no longer ignored when doing groupby apply (:issue:`4648`, :issue:`3417`) -- JSON NaT handling fixed, NaTs are now serialised to `null` (:issue:`4498`) +- JSON NaT handling fixed, NaTs are now serialized to `null` (:issue:`4498`) - Fixed JSON handling of escapable characters in JSON object keys (:issue:`4593`) - Fixed passing ``keep_default_na=False`` when ``na_values=None`` @@ -1086,7 +1086,7 @@ Bug Fixes - Fix a bug where reshaping a ``Series`` to its own shape raised ``TypeError`` (:issue:`4554`) and other reshaping issues. - Bug in setting with ``ix/loc`` and a mixed int/string index (:issue:`4544`) -- Make sure series-series boolean comparions are label based (:issue:`4947`) +- Make sure series-series boolean comparisons are label based (:issue:`4947`) - Bug in multi-level indexing with a Timestamp partial indexer (:issue:`4294`) - Tests/fix for multi-index construction of an all-nan frame (:issue:`4078`) @@ -1096,7 +1096,7 @@ Bug Fixes ordering of returned tables (:issue:`4770`, :issue:`5029`). - Fixed a bug where :func:`~pandas.read_html` was incorrectly parsing when passed ``index_col=0`` (:issue:`5066`). -- Fixed a bug where :func:`~pandas.read_html` was incorrectly infering the +- Fixed a bug where :func:`~pandas.read_html` was incorrectly inferring the type of headers (:issue:`5048`). - Fixed a bug where ``DatetimeIndex`` joins with ``PeriodIndex`` caused a stack overflow (:issue:`3899`). @@ -1203,7 +1203,7 @@ New Features - Added support for writing in ``to_csv`` and reading in ``read_csv``, multi-index columns. The ``header`` option in ``read_csv`` now accepts a list of the rows from which to read the index. Added the option, - ``tupleize_cols`` to provide compatiblity for the pre 0.12 behavior of + ``tupleize_cols`` to provide compatibility for the pre 0.12 behavior of writing and reading multi-index columns via a list of tuples. The default in 0.12 is to write lists of tuples and *not* interpret list of tuples as a multi-index column. @@ -1250,7 +1250,7 @@ Improvements to existing features :issue:`3572`, :issue:`3911`, :issue:`3912`), but they will try to convert object arrays to numeric arrays if possible so that you can still plot, for example, an object array with floats. This happens before any drawing takes place which - elimnates any spurious plots from showing up. + eliminates any spurious plots from showing up. - Added Faq section on repr display options, to help users customize their setup. - ``where`` operations that result in block splitting are much faster (:issue:`3733`) - Series and DataFrame hist methods now take a ``figsize`` argument (:issue:`3834`) @@ -1258,7 +1258,7 @@ Improvements to existing features operations (:issue:`3877`) - Add ``unit`` keyword to ``Timestamp`` and ``to_datetime`` to enable passing of integers or floats that are in an epoch unit of ``D, s, ms, us, ns``, thanks @mtkini (:issue:`3969`) - (e.g. unix timestamps or epoch ``s``, with fracional seconds allowed) (:issue:`3540`) + (e.g. unix timestamps or epoch ``s``, with fractional seconds allowed) (:issue:`3540`) - DataFrame corr method (spearman) is now cythonized. - Improved ``network`` test decorator to catch ``IOError`` (and therefore ``URLError`` as well). Added ``with_connectivity_check`` decorator to allow @@ -1296,7 +1296,7 @@ API Changes ``timedelta64[ns]`` to ``object/int`` (:issue:`3425`) - The behavior of ``datetime64`` dtypes has changed with respect to certain so-called reduction operations (:issue:`3726`). The following operations now - raise a ``TypeError`` when perfomed on a ``Series`` and return an *empty* + raise a ``TypeError`` when performed on a ``Series`` and return an *empty* ``Series`` when performed on a ``DataFrame`` similar to performing these operations on, for example, a ``DataFrame`` of ``slice`` objects: - sum, prod, mean, std, var, skew, kurt, corr, and cov @@ -1335,7 +1335,7 @@ API Changes deprecated - set FutureWarning to require data_source, and to replace year/month with expiry date in pandas.io options. This is in preparation to add options - data from google (:issue:`3822`) + data from Google (:issue:`3822`) - the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are deprecated - Implement ``__nonzero__`` for ``NDFrame`` objects (:issue:`3691`, :issue:`3696`) @@ -1452,13 +1452,13 @@ Bug Fixes their first argument (:issue:`3702`) - Fix file tokenization error with \r delimiter and quoted fields (:issue:`3453`) - Groupby transform with item-by-item not upcasting correctly (:issue:`3740`) -- Incorrectly read a HDFStore multi-index Frame witha column specification (:issue:`3748`) +- Incorrectly read a HDFStore multi-index Frame with a column specification (:issue:`3748`) - ``read_html`` now correctly skips tests (:issue:`3741`) - PandasObjects raise TypeError when trying to hash (:issue:`3882`) - Fix incorrect arguments passed to concat that are not list-like (e.g. concat(df1,df2)) (:issue:`3481`) - Correctly parse when passed the ``dtype=str`` (or other variable-len string dtypes) in ``read_csv`` (:issue:`3795`) -- Fix index name not propogating when using ``loc/ix`` (:issue:`3880`) +- Fix index name not propagating when using ``loc/ix`` (:issue:`3880`) - Fix groupby when applying a custom function resulting in a returned DataFrame was not converting dtypes (:issue:`3911`) - Fixed a bug where ``DataFrame.replace`` with a compiled regular expression @@ -1468,7 +1468,7 @@ Bug Fixes - Indexing with a string with seconds resolution not selecting from a time index (:issue:`3925`) - csv parsers would loop infinitely if ``iterator=True`` but no ``chunksize`` was specified (:issue:`3967`), python parser failing with ``chunksize=1`` -- Fix index name not propogating when using ``shift`` +- Fix index name not propagating when using ``shift`` - Fixed dropna=False being ignored with multi-index stack (:issue:`3997`) - Fixed flattening of columns when renaming MultiIndex columns DataFrame (:issue:`4004`) - Fix ``Series.clip`` for datetime series. NA/NaN threshold values will now throw ValueError (:issue:`3996`) @@ -1523,17 +1523,17 @@ New Features - New documentation section, ``10 Minutes to Pandas`` - New documentation section, ``Cookbook`` -- Allow mixed dtypes (e.g ``float32/float64/int32/int16/int8``) to coexist in DataFrames and propogate in operations +- Allow mixed dtypes (e.g ``float32/float64/int32/int16/int8``) to coexist in DataFrames and propagate in operations - Add function to pandas.io.data for retrieving stock index components from Yahoo! finance (:issue:`2795`) - Support slicing with time objects (:issue:`2681`) - Added ``.iloc`` attribute, to support strict integer based indexing, analogous to ``.ix`` (:issue:`2922`) -- Added ``.loc`` attribute, to support strict label based indexing, analagous to ``.ix`` (:issue:`3053`) +- Added ``.loc`` attribute, to support strict label based indexing, analogous to ``.ix`` (:issue:`3053`) - Added ``.iat`` attribute, to support fast scalar access via integers (replaces ``iget_value/iset_value``) - Added ``.at`` attribute, to support fast scalar access via labels (replaces ``get_value/set_value``) -- Moved functionaility from ``irow,icol,iget_value/iset_value`` to ``.iloc`` indexer (via ``_ixs`` methods in each object) +- Moved functionality from ``irow,icol,iget_value/iset_value`` to ``.iloc`` indexer (via ``_ixs`` methods in each object) - Added support for expression evaluation using the ``numexpr`` library - Added ``convert=boolean`` to ``take`` routines to translate negative indices to positive, defaults to True -- Added to_series() method to indices, to facilitate the creation of indexeres (:issue:`3275`) +- Added to_series() method to indices, to facilitate the creation of indexers (:issue:`3275`) Improvements to existing features ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -1760,7 +1760,7 @@ Bug Fixes - Fixed a bug in the legend of plotting.andrews_curves() (:issue:`3278`) - Produce a series on apply if we only generate a singular series and have a simple index (:issue:`2893`) -- Fix Python ascii file parsing when integer falls outside of floating point +- Fix Python ASCII file parsing when integer falls outside of floating point spacing (:issue:`3258`) - fixed pretty priniting of sets (:issue:`3294`) - Panel() and Panel.from_dict() now respects ordering when give OrderedDict (:issue:`3303`) @@ -1783,7 +1783,7 @@ pandas 0.10.1 New Features ~~~~~~~~~~~~ -- Add data inferface to World Bank WDI pandas.io.wb (:issue:`2592`) +- Add data interface to World Bank WDI pandas.io.wb (:issue:`2592`) API Changes ~~~~~~~~~~~ @@ -1822,7 +1822,7 @@ Improvements to existing features - added method ``copy`` to copy an existing store (and possibly upgrade) - show the shape of the data on disk for non-table stores when printing the store - - added ability to read PyTables flavor tables (allows compatiblity to + - added ability to read PyTables flavor tables (allows compatibility to other HDF5 systems) - Add ``logx`` option to DataFrame/Series.plot (:issue:`2327`, :issue:`2565`) @@ -1837,7 +1837,7 @@ Improvements to existing features - Add methods ``neg`` and ``inv`` to Series - Implement ``kind`` option in ``ExcelFile`` to indicate whether it's an XLS or XLSX file (:issue:`2613`) -- Documented a fast-path in pd.read_Csv when parsing iso8601 datetime strings +- Documented a fast-path in pd.read_csv when parsing iso8601 datetime strings yielding as much as a 20x speedup. (:issue:`5993`) @@ -1955,7 +1955,7 @@ New Features Experimental Features ~~~~~~~~~~~~~~~~~~~~~ -- Add support for Panel4D, a named 4 Dimensional stucture +- Add support for Panel4D, a named 4 Dimensional structure - Add support for ndpanel factory functions, to create custom, domain-specific N-Dimensional containers @@ -2008,7 +2008,7 @@ Improvements to existing features - Add ``normalize`` option to Series/DataFrame.asfreq (:issue:`2137`) - SparseSeries and SparseDataFrame construction from empty and scalar values now no longer create dense ndarrays unnecessarily (:issue:`2322`) -- ``HDFStore`` now supports hierarchial keys (:issue:`2397`) +- ``HDFStore`` now supports hierarchical keys (:issue:`2397`) - Support multiple query selection formats for ``HDFStore tables`` (:issue:`1996`) - Support ``del store['df']`` syntax to delete HDFStores - Add multi-dtype support for ``HDFStore tables`` @@ -2077,7 +2077,7 @@ Bug Fixes - Fix DataFrame row indexing case with MultiIndex (:issue:`2314`) - Fix to_excel exporting issues with Timestamp objects in index (:issue:`2294`) - Fixes assigning scalars and array to hierarchical column chunk (:issue:`1803`) -- Fixed a UnicdeDecodeError with series tidy_repr (:issue:`2225`) +- Fixed a UnicodeDecodeError with series tidy_repr (:issue:`2225`) - Fixed issued with duplicate keys in an index (:issue:`2347`, :issue:`2380`) - Fixed issues re: Hash randomization, default on starting w/ py3.3 (:issue:`2331`) - Fixed issue with missing attributes after loading a pickled dataframe (:issue:`2431`) @@ -2783,7 +2783,7 @@ Bug Fixes (:issue:`1013`) - DataFrame.plot(logy=True) has no effect (:issue:`1011`). - Broken arithmetic operations between SparsePanel-Panel (:issue:`1015`) -- Unicode repr issues in MultiIndex with non-ascii characters (:issue:`1010`) +- Unicode repr issues in MultiIndex with non-ASCII characters (:issue:`1010`) - DataFrame.lookup() returns inconsistent results if exact match not present (:issue:`1001`) - DataFrame arithmetic operations not treating None as NA (:issue:`992`) @@ -2794,7 +2794,7 @@ Bug Fixes - DataFrame.plot(kind='bar') ignores color argument (:issue:`958`) - Inconsistent Index comparison results (:issue:`948`) - Improper int dtype DataFrame construction from data with NaN (:issue:`846`) -- Removes default 'result' name in grouby results (:issue:`995`) +- Removes default 'result' name in groupby results (:issue:`995`) - DataFrame.from_records no longer mutate input columns (:issue:`975`) - Use Index name when grouping by it (:issue:`1313`) @@ -3866,7 +3866,7 @@ pandas 0.4.1 **Release date:** 9/25/2011 is is primarily a bug fix release but includes some new features and -provements +improvements New Features ~~~~~~~~~~~~ diff --git a/doc/source/rplot.rst b/doc/source/rplot.rst index cdecee39d8d1e..46b57cea2d9ed 100644 --- a/doc/source/rplot.rst +++ b/doc/source/rplot.rst @@ -99,7 +99,7 @@ The plot above shows that it is possible to have two or more plots for the same @savefig rplot4_tips.png plot.render(plt.gcf()) -Above is a similar plot but with 2D kernel desnity estimation plot superimposed. +Above is a similar plot but with 2D kernel density estimation plot superimposed. .. ipython:: python diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 76bc796beced8..cbfb20c6f9d7d 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -379,9 +379,9 @@ We are stopping on the included end-point as its part of the index Datetime Indexing ~~~~~~~~~~~~~~~~~ -Indexing a ``DateTimeIndex`` with a partial string depends on the "accuracy" of the period, in other words how specific the interval is in relation to the frequency of the index. In contrast, indexing with datetime objects is exact, because the objects have exact meaning. These also follow the sematics of *including both endpoints*. +Indexing a ``DateTimeIndex`` with a partial string depends on the "accuracy" of the period, in other words how specific the interval is in relation to the frequency of the index. In contrast, indexing with datetime objects is exact, because the objects have exact meaning. These also follow the semantics of *including both endpoints*. -These ``datetime`` objects are specific ``hours, minutes,`` and ``seconds`` even though they were not explicity specified (they are ``0``). +These ``datetime`` objects are specific ``hours, minutes,`` and ``seconds`` even though they were not explicitly specified (they are ``0``). .. ipython:: python @@ -1460,7 +1460,7 @@ Series of timedeltas with ``NaT`` values are supported y = s - s.shift() y -Elements can be set to ``NaT`` using ``np.nan`` analagously to datetimes +Elements can be set to ``NaT`` using ``np.nan`` analogously to datetimes .. ipython:: python diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst index 630e40c4ebfa2..69e04483cb47d 100644 --- a/doc/source/visualization.rst +++ b/doc/source/visualization.rst @@ -317,7 +317,7 @@ The return type of ``boxplot`` depends on two keyword arguments: ``by`` and ``re When ``by`` is ``None``: * if ``return_type`` is ``'dict'``, a dictionary containing the :class:`matplotlib Lines <matplotlib.lines.Line2D>` is returned. The keys are "boxes", "caps", "fliers", "medians", and "whiskers". - This is the deafult. + This is the default. * if ``return_type`` is ``'axes'``, a :class:`matplotlib Axes <matplotlib.axes.Axes>` containing the boxplot is returned. * if ``return_type`` is ``'both'`` a namedtuple containging the :class:`matplotlib Axes <matplotlib.axes.Axes>` and :class:`matplotlib Lines <matplotlib.lines.Line2D>` is returned @@ -763,7 +763,7 @@ layout and formatting of the returned plot: plt.figure(); ts.plot(style='k--', label='Series'); For each kind of plot (e.g. `line`, `bar`, `scatter`) any additional arguments -keywords are passed alogn to the corresponding matplotlib function +keywords are passed along to the corresponding matplotlib function (:meth:`ax.plot() <matplotlib.axes.Axes.plot>`, :meth:`ax.bar() <matplotlib.axes.Axes.bar>`, :meth:`ax.scatter() <matplotlib.axes.Axes.scatter>`). These can be used
https://api.github.com/repos/pandas-dev/pandas/pulls/7745
2014-07-13T11:23:38Z
2014-07-13T12:37:47Z
2014-07-13T12:37:47Z
2014-07-13T12:37:47Z
spell fix: seperated -> separated
diff --git a/doc/source/basics.rst b/doc/source/basics.rst index 4d67616c5cd60..14942c6f0f194 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -1253,11 +1253,11 @@ Methods like ``match``, ``contains``, ``startswith``, and ``endswith`` take ``upper``,Equivalent to ``str.upper`` -Getting indicator variables from seperated strings +Getting indicator variables from separated strings ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can extract dummy variables from string columns. -For example if they are seperated by a ``'|'``: +For example if they are separated by a ``'|'``: .. ipython:: python
https://api.github.com/repos/pandas-dev/pandas/pulls/7742
2014-07-13T08:51:19Z
2014-07-13T11:47:37Z
2014-07-13T11:47:37Z
2014-07-13T12:13:22Z
ENH/BUG: DatetimeIndex and PeriodIndex in-place ops behaves incorrectly
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index 06c93541a7783..086c24246918d 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -198,6 +198,10 @@ Bug Fixes - Bug in ``DataFrame.as_matrix()`` with mixed ``datetime64[ns]`` and ``timedelta64[ns]`` dtypes (:issue:`7778`) - Bug in ``HDFStore.select_column()`` not preserving UTC timezone info when selecting a DatetimeIndex (:issue:`7777`) +- Bug in ``DatetimeIndex`` and ``PeriodIndex`` in-place addition and subtraction cause different result from normal one (:issue:`6527`) +- Bug in adding and subtracting ``PeriodIndex`` with ``PeriodIndex`` raise ``TypeError`` (:issue:`7741`) +- Bug in ``combine_first`` with ``PeriodIndex`` data raises ``TypeError`` (:issue:`3367`) + - Bug in pickles contains ``DateOffset`` may raise ``AttributeError`` when ``normalize`` attribute is reffered internally (:issue:`7748`) diff --git a/pandas/core/base.py b/pandas/core/base.py index 4035627b98458..243e34e35784a 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -1,6 +1,8 @@ """ Base and utility classes for pandas objects. """ +import datetime + from pandas import compat import numpy as np from pandas.core import common as com @@ -511,4 +513,34 @@ def resolution(self): from pandas.tseries.frequencies import get_reso_string return get_reso_string(self._resolution) + def __add__(self, other): + from pandas.core.index import Index + from pandas.tseries.offsets import DateOffset + if isinstance(other, Index): + return self.union(other) + elif isinstance(other, (DateOffset, datetime.timedelta, np.timedelta64)): + return self._add_delta(other) + elif com.is_integer(other): + return self.shift(other) + else: # pragma: no cover + return NotImplemented + + def __sub__(self, other): + from pandas.core.index import Index + from pandas.tseries.offsets import DateOffset + if isinstance(other, Index): + return self.diff(other) + elif isinstance(other, (DateOffset, datetime.timedelta, np.timedelta64)): + return self._add_delta(-other) + elif com.is_integer(other): + return self.shift(-other) + else: # pragma: no cover + return NotImplemented + + __iadd__ = __add__ + __isub__ = __sub__ + + def _add_delta(self, other): + return NotImplemented + diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index 761d79a288df3..1b7db1451f6cf 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -481,6 +481,8 @@ def test_factorize(self): class TestDatetimeIndexOps(Ops): _allowed = '_allow_datetime_index_ops' + tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', + 'dateutil/Asia/Singapore', 'dateutil/US/Pacific'] def setUp(self): super(TestDatetimeIndexOps, self).setUp() @@ -545,7 +547,7 @@ def test_asobject_tolist(self): self.assertEqual(idx.tolist(), expected_list) def test_minmax(self): - for tz in [None, 'Asia/Tokyo', 'US/Eastern']: + for tz in self.tz: # monotonic idx1 = pd.DatetimeIndex([pd.NaT, '2011-01-01', '2011-01-02', '2011-01-03'], tz=tz) @@ -613,6 +615,100 @@ def test_resolution(self): idx = pd.date_range(start='2013-04-01', periods=30, freq=freq, tz=tz) self.assertEqual(idx.resolution, expected) + def test_add_iadd(self): + for tz in self.tz: + # union + rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) + other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz) + expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz) + + rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) + other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz) + expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz) + + rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) + other3 = pd.DatetimeIndex([], tz=tz) + expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) + + for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2), + (rng3, other3, expected3)]: + result_add = rng + other + result_union = rng.union(other) + + tm.assert_index_equal(result_add, expected) + tm.assert_index_equal(result_union, expected) + rng += other + tm.assert_index_equal(rng, expected) + + # offset + if _np_version_under1p7: + offsets = [pd.offsets.Hour(2), timedelta(hours=2)] + else: + offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h')] + + for delta in offsets: + rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz) + result = rng + delta + expected = pd.date_range('2000-01-01 02:00', '2000-02-01 02:00', tz=tz) + tm.assert_index_equal(result, expected) + rng += delta + tm.assert_index_equal(rng, expected) + + # int + rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10, tz=tz) + result = rng + 1 + expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10, tz=tz) + tm.assert_index_equal(result, expected) + rng += 1 + tm.assert_index_equal(rng, expected) + + def test_sub_isub(self): + for tz in self.tz: + # diff + rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) + other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz) + expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) + + rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) + other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz) + expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz) + + rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) + other3 = pd.DatetimeIndex([], tz=tz) + expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) + + for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2), + (rng3, other3, expected3)]: + result_add = rng - other + result_union = rng.diff(other) + + tm.assert_index_equal(result_add, expected) + tm.assert_index_equal(result_union, expected) + rng -= other + tm.assert_index_equal(rng, expected) + + # offset + if _np_version_under1p7: + offsets = [pd.offsets.Hour(2), timedelta(hours=2)] + else: + offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h')] + + for delta in offsets: + rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz) + result = rng - delta + expected = pd.date_range('1999-12-31 22:00', '2000-01-31 22:00', tz=tz) + tm.assert_index_equal(result, expected) + rng -= delta + tm.assert_index_equal(rng, expected) + + # int + rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10, tz=tz) + result = rng - 1 + expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10, tz=tz) + tm.assert_index_equal(result, expected) + rng -= 1 + tm.assert_index_equal(rng, expected) + class TestPeriodIndexOps(Ops): _allowed = '_allow_period_index_ops' @@ -745,6 +841,133 @@ def test_resolution(self): idx = pd.period_range(start='2013-04-01', periods=30, freq=freq) self.assertEqual(idx.resolution, expected) + def test_add_iadd(self): + # union + rng1 = pd.period_range('1/1/2000', freq='D', periods=5) + other1 = pd.period_range('1/6/2000', freq='D', periods=5) + expected1 = pd.period_range('1/1/2000', freq='D', periods=10) + + rng2 = pd.period_range('1/1/2000', freq='D', periods=5) + other2 = pd.period_range('1/4/2000', freq='D', periods=5) + expected2 = pd.period_range('1/1/2000', freq='D', periods=8) + + rng3 = pd.period_range('1/1/2000', freq='D', periods=5) + other3 = pd.PeriodIndex([], freq='D') + expected3 = pd.period_range('1/1/2000', freq='D', periods=5) + + rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5) + other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5) + expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00', + '2000-01-01 11:00', '2000-01-01 12:00', + '2000-01-01 13:00', '2000-01-02 09:00', + '2000-01-02 10:00', '2000-01-02 11:00', + '2000-01-02 12:00', '2000-01-02 13:00'], + freq='H') + + rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03', + '2000-01-01 09:05'], freq='T') + other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05' + '2000-01-01 09:08'], freq='T') + expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03', + '2000-01-01 09:05', '2000-01-01 09:08'], + freq='T') + + rng6 = pd.period_range('2000-01-01', freq='M', periods=7) + other6 = pd.period_range('2000-04-01', freq='M', periods=7) + expected6 = pd.period_range('2000-01-01', freq='M', periods=10) + + rng7 = pd.period_range('2003-01-01', freq='A', periods=5) + other7 = pd.period_range('1998-01-01', freq='A', periods=8) + expected7 = pd.period_range('1998-01-01', freq='A', periods=10) + + for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2), + (rng3, other3, expected3), (rng4, other4, expected4), + (rng5, other5, expected5), (rng6, other6, expected6), + (rng7, other7, expected7)]: + + result_add = rng + other + result_union = rng.union(other) + + tm.assert_index_equal(result_add, expected) + tm.assert_index_equal(result_union, expected) + # GH 6527 + rng += other + tm.assert_index_equal(rng, expected) + + # offset + for delta in [pd.offsets.Hour(2), timedelta(hours=2)]: + rng = pd.period_range('2000-01-01', '2000-02-01') + with tm.assertRaisesRegexp(TypeError, 'unsupported operand type\(s\)'): + result = rng + delta + with tm.assertRaisesRegexp(TypeError, 'unsupported operand type\(s\)'): + rng += delta + + # int + rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10) + result = rng + 1 + expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10) + tm.assert_index_equal(result, expected) + rng += 1 + tm.assert_index_equal(rng, expected) + + def test_sub_isub(self): + # diff + rng1 = pd.period_range('1/1/2000', freq='D', periods=5) + other1 = pd.period_range('1/6/2000', freq='D', periods=5) + expected1 = pd.period_range('1/1/2000', freq='D', periods=5) + + rng2 = pd.period_range('1/1/2000', freq='D', periods=5) + other2 = pd.period_range('1/4/2000', freq='D', periods=5) + expected2 = pd.period_range('1/1/2000', freq='D', periods=3) + + rng3 = pd.period_range('1/1/2000', freq='D', periods=5) + other3 = pd.PeriodIndex([], freq='D') + expected3 = pd.period_range('1/1/2000', freq='D', periods=5) + + rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5) + other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5) + expected4 = rng4 + + rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03', + '2000-01-01 09:05'], freq='T') + other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'], freq='T') + expected5 = pd.PeriodIndex(['2000-01-01 09:03'], freq='T') + + rng6 = pd.period_range('2000-01-01', freq='M', periods=7) + other6 = pd.period_range('2000-04-01', freq='M', periods=7) + expected6 = pd.period_range('2000-01-01', freq='M', periods=3) + + rng7 = pd.period_range('2003-01-01', freq='A', periods=5) + other7 = pd.period_range('1998-01-01', freq='A', periods=8) + expected7 = pd.period_range('2006-01-01', freq='A', periods=2) + + for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2), + (rng3, other3, expected3), (rng4, other4, expected4), + (rng5, other5, expected5), (rng6, other6, expected6), + (rng7, other7, expected7),]: + result_add = rng - other + result_union = rng.diff(other) + + tm.assert_index_equal(result_add, expected) + tm.assert_index_equal(result_union, expected) + rng -= other + tm.assert_index_equal(rng, expected) + + # offset + for delta in [pd.offsets.Hour(2), timedelta(hours=2)]: + with tm.assertRaisesRegexp(TypeError, 'unsupported operand type\(s\)'): + result = rng + delta + with tm.assertRaisesRegexp(TypeError, 'unsupported operand type\(s\)'): + rng += delta + + # int + rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10) + result = rng - 1 + expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10) + tm.assert_index_equal(result, expected) + rng -= 1 + tm.assert_index_equal(rng, expected) + if __name__ == '__main__': import nose diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 9423037844e74..2a3c53135a644 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -595,30 +595,6 @@ def __setstate__(self, state): else: # pragma: no cover np.ndarray.__setstate__(self, state) - def __add__(self, other): - if isinstance(other, Index): - return self.union(other) - elif isinstance(other, (DateOffset, timedelta)): - return self._add_delta(other) - elif isinstance(other, np.timedelta64): - return self._add_delta(other) - elif com.is_integer(other): - return self.shift(other) - else: # pragma: no cover - raise TypeError(other) - - def __sub__(self, other): - if isinstance(other, Index): - return self.diff(other) - elif isinstance(other, (DateOffset, timedelta)): - return self._add_delta(-other) - elif isinstance(other, np.timedelta64): - return self._add_delta(-other) - elif com.is_integer(other): - return self.shift(-other) - else: # pragma: no cover - raise TypeError(other) - def _add_delta(self, delta): if isinstance(delta, (Tick, timedelta)): inc = offsets._delta_to_nanoseconds(delta) diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index 8c4bb2f5adc5e..887bf806dd4e4 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -872,19 +872,6 @@ def shift(self, n): values[mask] = tslib.iNaT return PeriodIndex(data=values, name=self.name, freq=self.freq) - def __add__(self, other): - try: - return self.shift(other) - except TypeError: - # self.values + other raises TypeError for invalid input - return NotImplemented - - def __sub__(self, other): - try: - return self.shift(-other) - except TypeError: - return NotImplemented - @property def inferred_type(self): # b/c data is represented as ints make sure we can't have ambiguous diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index 53375b4d07796..f5f66a49c29d4 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -2450,6 +2450,20 @@ def test_recreate_from_data(self): idx = PeriodIndex(org.values, freq=o) self.assertTrue(idx.equals(org)) + def test_combine_first(self): + # GH 3367 + didx = pd.DatetimeIndex(start='1950-01-31', end='1950-07-31', freq='M') + pidx = pd.PeriodIndex(start=pd.Period('1950-1'), end=pd.Period('1950-7'), freq='M') + # check to be consistent with DatetimeIndex + for idx in [didx, pidx]: + a = pd.Series([1, np.nan, np.nan, 4, 5, np.nan, 7], index=idx) + b = pd.Series([9, 9, 9, 9, 9, 9, 9], index=idx) + + result = a.combine_first(b) + expected = pd.Series([1, 9, 9, 4, 5, 9, 7], index=idx, dtype=np.float64) + tm.assert_series_equal(result, expected) + + def _permute(obj): return obj.take(np.random.permutation(len(obj))) diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index b6761426edc5d..f2bc66f156c75 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -1235,13 +1235,6 @@ def test_last_subset(self): result = ts[:0].last('3M') assert_series_equal(result, ts[:0]) - def test_add_offset(self): - rng = date_range('1/1/2000', '2/1/2000') - - result = rng + offsets.Hour(2) - expected = date_range('1/1/2000 02:00', '2/1/2000 02:00') - self.assertTrue(result.equals(expected)) - def test_format_pre_1900_dates(self): rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC') rng.format() @@ -2314,14 +2307,6 @@ def test_map(self): exp = [f(x) for x in rng] self.assert_numpy_array_equal(result, exp) - def test_add_union(self): - rng = date_range('1/1/2000', periods=5) - rng2 = date_range('1/6/2000', periods=5) - - result = rng + rng2 - expected = rng.union(rng2) - self.assertTrue(result.equals(expected)) - def test_misc_coverage(self): rng = date_range('1/1/2000', periods=5) result = rng.groupby(rng.day)
Fixes 2 issues related to `DetetimeIndex` and `PeriodIndex` ops. - Addition / subtraction between `PeriodIndex` raise `TypeError` (Closes #3367). ``` pidx + pidx # TypeError: unsupported operand type(s) for +: 'PeriodIndex' and 'PeriodIndex' ``` - In-place addition / subtraction doesn't return the same result as normal addition / subtraction. Specifically, `PeriodIndex` in-place operation results in `Int64Index` (Closes #6527) ``` didx = pd.date_range('2011-01-01', freq='D', periods=5) # This results shift (expected) didx + 1 # <class 'pandas.tseries.index.DatetimeIndex'> # [2011-01-02, ..., 2011-01-06] # Length: 5, Freq: D, Timezone: None # This result adding 1 unit (nano second) (NG) didx += 1 didx # <class 'pandas.tseries.index.DatetimeIndex'> # [2011-01-01 00:00:00.000000001, ..., 2011-01-05 00:00:00.000000001] # Length: 5, Freq: None, Timezone: None ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7741
2014-07-13T00:14:25Z
2014-07-23T21:28:50Z
2014-07-23T21:28:50Z
2014-07-25T20:42:29Z
BUG: _flex_binary_moment() doesn't preserve column order or handle multiple columns with the same label
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index 1c05c01633b15..da96d1e359454 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -265,7 +265,7 @@ Bug Fixes -- Bug in repeated timeseries line and area plot may result in ``ValueError`` or incorrect kind (:issue:`7733`) +- Bug in repeated timeseries line and area plot may result in ``ValueError`` or incorrect kind (:issue:`7733`) @@ -278,7 +278,10 @@ Bug Fixes - Bug in ``DataFrame.plot`` with ``subplots=True`` may draw unnecessary minor xticks and yticks (:issue:`7801`) - Bug in ``StataReader`` which did not read variable labels in 117 files due to difference between Stata documentation and implementation (:issue:`7816`) - +- Bug in ``expanding_cov``, ``expanding_corr``, ``rolling_cov``, ``rolling_cov``, ``ewmcov``, and ``ewmcorr`` + returning results with columns sorted by name and producing an error for non-unique columns; + now handles non-unique columns and returns columns in original order + (except for the case of two DataFrames with ``pairwise=False``, where behavior is unchanged) (:issue:`7542`) diff --git a/pandas/stats/moments.py b/pandas/stats/moments.py index 6f06255c7262d..a62d8178385cc 100644 --- a/pandas/stats/moments.py +++ b/pandas/stats/moments.py @@ -259,38 +259,55 @@ def _flex_binary_moment(arg1, arg2, f, pairwise=False): isinstance(arg2, (np.ndarray,Series)): X, Y = _prep_binary(arg1, arg2) return f(X, Y) + elif isinstance(arg1, DataFrame): + def dataframe_from_int_dict(data, frame_template): + result = DataFrame(data, index=frame_template.index) + result.columns = frame_template.columns[result.columns] + return result + results = {} if isinstance(arg2, DataFrame): - X, Y = arg1.align(arg2, join='outer') if pairwise is False: - X = X + 0 * Y - Y = Y + 0 * X - res_columns = arg1.columns.union(arg2.columns) - for col in res_columns: - if col in X and col in Y: - results[col] = f(X[col], Y[col]) + if arg1 is arg2: + # special case in order to handle duplicate column names + for i, col in enumerate(arg1.columns): + results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i]) + return dataframe_from_int_dict(results, arg1) + else: + if not arg1.columns.is_unique: + raise ValueError("'arg1' columns are not unique") + if not arg2.columns.is_unique: + raise ValueError("'arg2' columns are not unique") + X, Y = arg1.align(arg2, join='outer') + X = X + 0 * Y + Y = Y + 0 * X + res_columns = arg1.columns.union(arg2.columns) + for col in res_columns: + if col in X and col in Y: + results[col] = f(X[col], Y[col]) + return DataFrame(results, index=X.index, columns=res_columns) elif pairwise is True: results = defaultdict(dict) for i, k1 in enumerate(arg1.columns): for j, k2 in enumerate(arg2.columns): if j<i and arg2 is arg1: # Symmetric case - results[k1][k2] = results[k2][k1] + results[i][j] = results[j][i] else: - results[k1][k2] = f(*_prep_binary(arg1[k1], arg2[k2])) - return Panel.from_dict(results).swapaxes('items', 'major') + results[i][j] = f(*_prep_binary(arg1.iloc[:, i], arg2.iloc[:, j])) + p = Panel.from_dict(results).swapaxes('items', 'major') + p.major_axis = arg1.columns[p.major_axis] + p.minor_axis = arg2.columns[p.minor_axis] + return p else: raise ValueError("'pairwise' is not True/False") else: - res_columns = arg1.columns - X, Y = arg1.align(arg2, axis=0, join='outer') results = {} + for i, col in enumerate(arg1.columns): + results[i] = f(*_prep_binary(arg1.iloc[:, i], arg2)) + return dataframe_from_int_dict(results, arg1) - for col in res_columns: - results[col] = f(X[col], Y) - - return DataFrame(results, index=X.index, columns=res_columns) else: return _flex_binary_moment(arg2, arg1, f) diff --git a/pandas/stats/tests/test_moments.py b/pandas/stats/tests/test_moments.py index 7124eaf6fb797..4b5bb042e1fc7 100644 --- a/pandas/stats/tests/test_moments.py +++ b/pandas/stats/tests/test_moments.py @@ -8,7 +8,7 @@ from pandas import Series, DataFrame, Panel, bdate_range, isnull, notnull from pandas.util.testing import ( - assert_almost_equal, assert_series_equal, assert_frame_equal, assert_panel_equal + assert_almost_equal, assert_series_equal, assert_frame_equal, assert_panel_equal, assert_index_equal ) import pandas.core.datetools as datetools import pandas.stats.moments as mom @@ -970,6 +970,119 @@ def test_expanding_corr_pairwise_diff_length(self): assert_frame_equal(result2, expected) assert_frame_equal(result3, expected) assert_frame_equal(result4, expected) + + def test_pairwise_stats_column_names_order(self): + # GH 7738 + df1s = [DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=[0,1]), + DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=[1,0]), + DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=[1,1]), + DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=['C','C']), + DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=[1.,0]), + DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=[0.,1]), + DataFrame([[2,4],[1,2],[5,2],[8,1]], columns=['C',1]), + DataFrame([[2.,4.],[1.,2.],[5.,2.],[8.,1.]], columns=[1,0.]), + DataFrame([[2,4.],[1,2.],[5,2.],[8,1.]], columns=[0,1.]), + DataFrame([[2,4],[1,2],[5,2],[8,1.]], columns=[1.,'X']), + ] + df2 = DataFrame([[None,1,1],[None,1,2],[None,3,2],[None,8,1]], columns=['Y','Z','X']) + s = Series([1,1,3,8]) + + # DataFrame methods (which do not call _flex_binary_moment()) + for f in [lambda x: x.cov(), + lambda x: x.corr(), + ]: + results = [f(df) for df in df1s] + for (df, result) in zip(df1s, results): + assert_index_equal(result.index, df.columns) + assert_index_equal(result.columns, df.columns) + for i, result in enumerate(results): + if i > 0: + self.assert_numpy_array_equivalent(result, results[0]) + + # DataFrame with itself, pairwise=True + for f in [lambda x: mom.expanding_cov(x, pairwise=True), + lambda x: mom.expanding_corr(x, pairwise=True), + lambda x: mom.rolling_cov(x, window=3, pairwise=True), + lambda x: mom.rolling_corr(x, window=3, pairwise=True), + lambda x: mom.ewmcov(x, com=3, pairwise=True), + lambda x: mom.ewmcorr(x, com=3, pairwise=True), + ]: + results = [f(df) for df in df1s] + for (df, result) in zip(df1s, results): + assert_index_equal(result.items, df.index) + assert_index_equal(result.major_axis, df.columns) + assert_index_equal(result.minor_axis, df.columns) + for i, result in enumerate(results): + if i > 0: + self.assert_numpy_array_equivalent(result, results[0]) + + # DataFrame with itself, pairwise=False + for f in [lambda x: mom.expanding_cov(x, pairwise=False), + lambda x: mom.expanding_corr(x, pairwise=False), + lambda x: mom.rolling_cov(x, window=3, pairwise=False), + lambda x: mom.rolling_corr(x, window=3, pairwise=False), + lambda x: mom.ewmcov(x, com=3, pairwise=False), + lambda x: mom.ewmcorr(x, com=3, pairwise=False), + ]: + results = [f(df) for df in df1s] + for (df, result) in zip(df1s, results): + assert_index_equal(result.index, df.index) + assert_index_equal(result.columns, df.columns) + for i, result in enumerate(results): + if i > 0: + self.assert_numpy_array_equivalent(result, results[0]) + + # DataFrame with another DataFrame, pairwise=True + for f in [lambda x, y: mom.expanding_cov(x, y, pairwise=True), + lambda x, y: mom.expanding_corr(x, y, pairwise=True), + lambda x, y: mom.rolling_cov(x, y, window=3, pairwise=True), + lambda x, y: mom.rolling_corr(x, y, window=3, pairwise=True), + lambda x, y: mom.ewmcov(x, y, com=3, pairwise=True), + lambda x, y: mom.ewmcorr(x, y, com=3, pairwise=True), + ]: + results = [f(df, df2) for df in df1s] + for (df, result) in zip(df1s, results): + assert_index_equal(result.items, df.index) + assert_index_equal(result.major_axis, df.columns) + assert_index_equal(result.minor_axis, df2.columns) + for i, result in enumerate(results): + if i > 0: + self.assert_numpy_array_equivalent(result, results[0]) + + # DataFrame with another DataFrame, pairwise=False + for f in [lambda x, y: mom.expanding_cov(x, y, pairwise=False), + lambda x, y: mom.expanding_corr(x, y, pairwise=False), + lambda x, y: mom.rolling_cov(x, y, window=3, pairwise=False), + lambda x, y: mom.rolling_corr(x, y, window=3, pairwise=False), + lambda x, y: mom.ewmcov(x, y, com=3, pairwise=False), + lambda x, y: mom.ewmcorr(x, y, com=3, pairwise=False), + ]: + results = [f(df, df2) if df.columns.is_unique else None for df in df1s] + for (df, result) in zip(df1s, results): + if result is not None: + expected_index = df.index.union(df2.index) + expected_columns = df.columns.union(df2.columns) + assert_index_equal(result.index, expected_index) + assert_index_equal(result.columns, expected_columns) + else: + tm.assertRaisesRegexp(ValueError, "'arg1' columns are not unique", f, df, df2) + tm.assertRaisesRegexp(ValueError, "'arg2' columns are not unique", f, df2, df) + + # DataFrame with a Series + for f in [lambda x, y: mom.expanding_cov(x, y), + lambda x, y: mom.expanding_corr(x, y), + lambda x, y: mom.rolling_cov(x, y, window=3), + lambda x, y: mom.rolling_corr(x, y, window=3), + lambda x, y: mom.ewmcov(x, y, com=3), + lambda x, y: mom.ewmcorr(x, y, com=3), + ]: + results = [f(df, s) for df in df1s] + [f(s, df) for df in df1s] + for (df, result) in zip(df1s, results): + assert_index_equal(result.index, df.index) + assert_index_equal(result.columns, df.columns) + for i, result in enumerate(results): + if i > 0: + self.assert_numpy_array_equivalent(result, results[0]) def test_rolling_skew_edge_cases(self):
Closes https://github.com/pydata/pandas/issues/7542.
https://api.github.com/repos/pandas-dev/pandas/pulls/7738
2014-07-12T18:57:33Z
2014-07-25T14:34:07Z
2014-07-25T14:34:07Z
2014-09-10T00:12:39Z
ENH: Use left._constructor on pd.merge
diff --git a/doc/source/merging.rst b/doc/source/merging.rst index 04fb0b0695f8f..55bbf613b33cf 100644 --- a/doc/source/merging.rst +++ b/doc/source/merging.rst @@ -376,6 +376,10 @@ Here's a description of what each argument is for: can be avoided are somewhat pathological but this option is provided nonetheless. +The return type will be the same as ``left``. If ``left`` is a ``DataFrame`` +and ``right`` is a subclass of DataFrame, the return type will still be +``DataFrame``. + ``merge`` is a function in the pandas namespace, and it is also available as a DataFrame instance method, with the calling DataFrame being implicitly considered the left object in the join. diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index 148cf85d0b5ab..7a9ba2ed6e53d 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -166,6 +166,9 @@ previously results in ``Exception`` or ``TypeError`` (:issue:`7812`) - ``DataFrame.tz_localize`` and ``DataFrame.tz_convert`` now accepts an optional ``level`` argument for localizing a specific level of a MultiIndex (:issue:`7846`) +- ``merge``, ``DataFrame.merge``, and ``ordered_merge`` now return the same type + as the ``left`` argument. (:issue:`7737`) + .. _whatsnew_0150.dt: .dt accessor diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 3979ae76f14c3..352ac52281c54 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -135,6 +135,8 @@ Returns ------- merged : DataFrame + The output type will the be same as 'left', if it is a subclass + of DataFrame. """ #---------------------------------------------------------------------- diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py index ee594ef031e82..3a5c191148fe6 100644 --- a/pandas/tools/merge.py +++ b/pandas/tools/merge.py @@ -106,6 +106,8 @@ def ordered_merge(left, right, on=None, left_by=None, right_by=None, Returns ------- merged : DataFrame + The output type will the be same as 'left', if it is a subclass + of DataFrame. """ def _merger(x, y): op = _OrderedMerge(x, y, on=on, left_on=left_on, right_on=right_on, @@ -198,7 +200,8 @@ def get_result(self): axes=[llabels.append(rlabels), join_index], concat_axis=0, copy=self.copy) - result = DataFrame(result_data).__finalize__(self, method='merge') + typ = self.left._constructor + result = typ(result_data).__finalize__(self, method='merge') self._maybe_add_join_keys(result, left_indexer, right_indexer) @@ -520,7 +523,8 @@ def get_result(self): axes=[llabels.append(rlabels), join_index], concat_axis=0, copy=self.copy) - result = DataFrame(result_data) + typ = self.left._constructor + result = typ(result_data).__finalize__(self, method='ordered_merge') self._maybe_add_join_keys(result, left_indexer, right_indexer) diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py index df2f270346e20..6985da233ed58 100644 --- a/pandas/tools/tests/test_merge.py +++ b/pandas/tools/tests/test_merge.py @@ -781,6 +781,16 @@ def test_merge_nan_right(self): 1: nan}})[['i1', 'i2', 'i1_', 'i3']] assert_frame_equal(result, expected) + def test_merge_type(self): + class NotADataFrame(DataFrame): + @property + def _constructor(self): + return NotADataFrame + + nad = NotADataFrame(self.df) + result = nad.merge(self.df2, on='key1') + + tm.assert_isinstance(result, NotADataFrame) def test_append_dtype_coerce(self): @@ -2154,6 +2164,18 @@ def test_multigroup(self): result = ordered_merge(left, self.right, on='key', left_by='group') self.assertTrue(result['group'].notnull().all()) + def test_merge_type(self): + class NotADataFrame(DataFrame): + @property + def _constructor(self): + return NotADataFrame + + nad = NotADataFrame(self.left) + result = nad.merge(self.right, on='key') + + tm.assert_isinstance(result, NotADataFrame) + + if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False)
Use the _constructor property when creating the merge result to preserve the output type. If a [GeoPandas](http://github.com/geopandas/geopandas) `GeoDataFrame` is merged with a `DataFrame`, the result is hard-coded to always be `DataFrame` [GeoPandas Issue #118](https://github.com/geopandas/geopandas/issues/118). We'd like it to return `GeoDataFrame` in these cases ``` >>> import geopandas as gpd >>> import pandas as pd >>> gdf = gpd.GeoDataFrame(...) >>> df = pd.DataFrame(...) >>> merged = pd.merge(gdf, df, on='column') >>> type(merged) GeoDataFrame ``` This PR uses `left._constructor` to generate the result type for merge operations.
https://api.github.com/repos/pandas-dev/pandas/pulls/7737
2014-07-12T16:53:33Z
2014-08-11T13:04:16Z
2014-08-11T13:04:16Z
2014-08-11T13:57:14Z
ENH: plot functions accept multiple axes and layout kw
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index d15a48535f1eb..bbf665b574409 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -303,6 +303,9 @@ Enhancements ~~~~~~~~~~~~ - Added support for bool, uint8, uint16 and uint32 datatypes in ``to_stata`` (:issue:`7097`, :issue:`7365`) +- Added ``layout`` keyword to ``DataFrame.plot`` (:issue:`6667`) +- Allow to pass multiple axes to ``DataFrame.plot``, ``hist`` and ``boxplot`` (:issue:`5353`, :issue:`6970`, :issue:`7069`) + - ``PeriodIndex`` supports ``resolution`` as the same as ``DatetimeIndex`` (:issue:`7708`) - ``pandas.tseries.holiday`` has added support for additional holidays and ways to observe holidays (:issue:`7070`) diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst index 40b5d7c1599c1..e8d3d147479c2 100644 --- a/doc/source/visualization.rst +++ b/doc/source/visualization.rst @@ -946,10 +946,41 @@ with the ``subplots`` keyword: @savefig frame_plot_subplots.png df.plot(subplots=True, figsize=(6, 6)); -Targeting Different Subplots -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Using Layout and Targetting Multiple Axes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -You can pass an ``ax`` argument to :meth:`Series.plot` to plot on a particular axis: +The layout of subplots can be specified by ``layout`` keyword. It can accept +``(rows, columns)``. The ``layout`` keyword can be used in +``hist`` and ``boxplot`` also. If input is invalid, ``ValueError`` will be raised. + +The number of axes which can be contained by rows x columns specified by ``layout`` must be +larger than the number of required subplots. If layout can contain more axes than required, +blank axes are not drawn. + +.. ipython:: python + + @savefig frame_plot_subplots_layout.png + df.plot(subplots=True, layout=(2, 3), figsize=(6, 6)); + +Also, you can pass multiple axes created beforehand as list-like via ``ax`` keyword. +This allows to use more complicated layout. +The passed axes must be the same number as the subplots being drawn. + +When multiple axes are passed via ``ax`` keyword, ``layout``, ``sharex`` and ``sharey`` keywords are ignored. +These must be configured when creating axes. + +.. ipython:: python + + fig, axes = plt.subplots(4, 4, figsize=(6, 6)); + plt.adjust_subplots(wspace=0.5, hspace=0.5); + target1 = [axes[0][0], axes[1][1], axes[2][2], axes[3][3]] + target2 = [axes[3][0], axes[2][1], axes[1][2], axes[0][3]] + + df.plot(subplots=True, ax=target1, legend=False); + @savefig frame_plot_subplots_multi_ax.png + (-df).plot(subplots=True, ax=target2, legend=False); + +Another option is passing an ``ax`` argument to :meth:`Series.plot` to plot on a particular axis: .. ipython:: python :suppress: @@ -964,12 +995,12 @@ You can pass an ``ax`` argument to :meth:`Series.plot` to plot on a particular a .. ipython:: python fig, axes = plt.subplots(nrows=2, ncols=2) - df['A'].plot(ax=axes[0,0]); axes[0,0].set_title('A') - df['B'].plot(ax=axes[0,1]); axes[0,1].set_title('B') - df['C'].plot(ax=axes[1,0]); axes[1,0].set_title('C') + df['A'].plot(ax=axes[0,0]); axes[0,0].set_title('A'); + df['B'].plot(ax=axes[0,1]); axes[0,1].set_title('B'); + df['C'].plot(ax=axes[1,0]); axes[1,0].set_title('C'); @savefig series_plot_multi.png - df['D'].plot(ax=axes[1,1]); axes[1,1].set_title('D') + df['D'].plot(ax=axes[1,1]); axes[1,1].set_title('D'); .. ipython:: python :suppress: diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index b3a92263370e8..1560b78a2f5e0 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -670,7 +670,7 @@ def test_hist_layout_with_by(self): axes = _check_plot_works(df.height.hist, by=df.classroom, layout=(2, 2)) self._check_axes_shape(axes, axes_num=3, layout=(2, 2)) - axes = _check_plot_works(df.height.hist, by=df.category, layout=(4, 2), figsize=(12, 7)) + axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7)) self._check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 7)) @slow @@ -1071,6 +1071,7 @@ def test_subplots(self): for kind in ['bar', 'barh', 'line', 'area']: axes = df.plot(kind=kind, subplots=True, sharex=True, legend=True) self._check_axes_shape(axes, axes_num=3, layout=(3, 1)) + self.assertEqual(axes.shape, (3, )) for ax, column in zip(axes, df.columns): self._check_legend_labels(ax, labels=[com.pprint_thing(column)]) @@ -1133,6 +1134,77 @@ def test_subplots_timeseries(self): self._check_visible(ax.get_yticklabels()) self._check_ticks_props(ax, xlabelsize=7, xrot=45) + def test_subplots_layout(self): + # GH 6667 + df = DataFrame(np.random.rand(10, 3), + index=list(string.ascii_letters[:10])) + + axes = df.plot(subplots=True, layout=(2, 2)) + self._check_axes_shape(axes, axes_num=3, layout=(2, 2)) + self.assertEqual(axes.shape, (2, 2)) + + axes = df.plot(subplots=True, layout=(1, 4)) + self._check_axes_shape(axes, axes_num=3, layout=(1, 4)) + self.assertEqual(axes.shape, (1, 4)) + + with tm.assertRaises(ValueError): + axes = df.plot(subplots=True, layout=(1, 1)) + + # single column + df = DataFrame(np.random.rand(10, 1), + index=list(string.ascii_letters[:10])) + axes = df.plot(subplots=True) + self._check_axes_shape(axes, axes_num=1, layout=(1, 1)) + self.assertEqual(axes.shape, (1, )) + + axes = df.plot(subplots=True, layout=(3, 3)) + self._check_axes_shape(axes, axes_num=1, layout=(3, 3)) + self.assertEqual(axes.shape, (3, 3)) + + @slow + def test_subplots_multiple_axes(self): + # GH 5353, 6970, GH 7069 + fig, axes = self.plt.subplots(2, 3) + df = DataFrame(np.random.rand(10, 3), + index=list(string.ascii_letters[:10])) + + returned = df.plot(subplots=True, ax=axes[0]) + self._check_axes_shape(returned, axes_num=3, layout=(1, 3)) + self.assertEqual(returned.shape, (3, )) + self.assertIs(returned[0].figure, fig) + # draw on second row + returned = df.plot(subplots=True, ax=axes[1]) + self._check_axes_shape(returned, axes_num=3, layout=(1, 3)) + self.assertEqual(returned.shape, (3, )) + self.assertIs(returned[0].figure, fig) + self._check_axes_shape(axes, axes_num=6, layout=(2, 3)) + tm.close() + + with tm.assertRaises(ValueError): + fig, axes = self.plt.subplots(2, 3) + # pass different number of axes from required + df.plot(subplots=True, ax=axes) + + # pass 2-dim axes and invalid layout + # invalid lauout should not affect to input and return value + # (show warning is tested in + # TestDataFrameGroupByPlots.test_grouped_box_multiple_axes + fig, axes = self.plt.subplots(2, 2) + df = DataFrame(np.random.rand(10, 4), + index=list(string.ascii_letters[:10])) + + returned = df.plot(subplots=True, ax=axes, layout=(2, 1)) + self._check_axes_shape(returned, axes_num=4, layout=(2, 2)) + self.assertEqual(returned.shape, (4, )) + + # single column + fig, axes = self.plt.subplots(1, 1) + df = DataFrame(np.random.rand(10, 1), + index=list(string.ascii_letters[:10])) + axes = df.plot(subplots=True, ax=[axes]) + self._check_axes_shape(axes, axes_num=1, layout=(1, 1)) + self.assertEqual(axes.shape, (1, )) + def test_negative_log(self): df = - DataFrame(rand(6, 4), index=list(string.ascii_letters[:6]), @@ -1718,7 +1790,7 @@ def test_hist_df_coord(self): normal_df = DataFrame({'A': np.repeat(np.array([1, 2, 3, 4, 5]), np.array([10, 9, 8, 7, 6])), 'B': np.repeat(np.array([1, 2, 3, 4, 5]), - np.array([8, 8, 8, 8, 8])), + np.array([8, 8, 8, 8, 8])), 'C': np.repeat(np.array([1, 2, 3, 4, 5]), np.array([6, 7, 8, 9, 10]))}, columns=['A', 'B', 'C']) @@ -1726,7 +1798,7 @@ def test_hist_df_coord(self): nan_df = DataFrame({'A': np.repeat(np.array([np.nan, 1, 2, 3, 4, 5]), np.array([3, 10, 9, 8, 7, 6])), 'B': np.repeat(np.array([1, np.nan, 2, 3, 4, 5]), - np.array([8, 3, 8, 8, 8, 8])), + np.array([8, 3, 8, 8, 8, 8])), 'C': np.repeat(np.array([1, 2, 3, np.nan, 4, 5]), np.array([6, 7, 8, 3, 9, 10]))}, columns=['A', 'B', 'C']) @@ -2712,6 +2784,41 @@ def test_grouped_box_layout(self): return_type='dict') self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 4)) + @slow + def test_grouped_box_multiple_axes(self): + # GH 6970, GH 7069 + df = self.hist_df + + # check warning to ignore sharex / sharey + # this check should be done in the first function which + # passes multiple axes to plot, hist or boxplot + # location should be changed if other test is added + # which has earlier alphabetical order + with tm.assert_produces_warning(UserWarning): + fig, axes = self.plt.subplots(2, 2) + df.groupby('category').boxplot(column='height', return_type='axes', ax=axes) + self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(2, 2)) + + fig, axes = self.plt.subplots(2, 3) + returned = df.boxplot(column=['height', 'weight', 'category'], by='gender', + return_type='axes', ax=axes[0]) + returned = np.array(returned.values()) + self._check_axes_shape(returned, axes_num=3, layout=(1, 3)) + self.assert_numpy_array_equal(returned, axes[0]) + self.assertIs(returned[0].figure, fig) + # draw on second row + returned = df.groupby('classroom').boxplot(column=['height', 'weight', 'category'], + return_type='axes', ax=axes[1]) + returned = np.array(returned.values()) + self._check_axes_shape(returned, axes_num=3, layout=(1, 3)) + self.assert_numpy_array_equal(returned, axes[1]) + self.assertIs(returned[0].figure, fig) + + with tm.assertRaises(ValueError): + fig, axes = self.plt.subplots(2, 3) + # pass different number of axes from required + axes = df.groupby('classroom').boxplot(ax=axes) + @slow def test_grouped_hist_layout(self): @@ -2724,12 +2831,12 @@ def test_grouped_hist_layout(self): axes = _check_plot_works(df.hist, column='height', by=df.gender, layout=(2, 1)) self._check_axes_shape(axes, axes_num=2, layout=(2, 1)) - axes = _check_plot_works(df.hist, column='height', by=df.category, layout=(4, 1)) + axes = df.hist(column='height', by=df.category, layout=(4, 1)) self._check_axes_shape(axes, axes_num=4, layout=(4, 1)) - axes = _check_plot_works(df.hist, column='height', by=df.category, - layout=(4, 2), figsize=(12, 8)) + axes = df.hist(column='height', by=df.category, layout=(4, 2), figsize=(12, 8)) self._check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 8)) + tm.close() # GH 6769 axes = _check_plot_works(df.hist, column='height', by='classroom', layout=(2, 2)) @@ -2739,13 +2846,32 @@ def test_grouped_hist_layout(self): axes = _check_plot_works(df.hist, by='classroom') self._check_axes_shape(axes, axes_num=3, layout=(2, 2)) - axes = _check_plot_works(df.hist, by='gender', layout=(3, 5)) + axes = df.hist(by='gender', layout=(3, 5)) self._check_axes_shape(axes, axes_num=2, layout=(3, 5)) - axes = _check_plot_works(df.hist, column=['height', 'weight', 'category']) + axes = df.hist(column=['height', 'weight', 'category']) self._check_axes_shape(axes, axes_num=3, layout=(2, 2)) @slow + def test_grouped_hist_multiple_axes(self): + # GH 6970, GH 7069 + df = self.hist_df + + fig, axes = self.plt.subplots(2, 3) + returned = df.hist(column=['height', 'weight', 'category'], ax=axes[0]) + self._check_axes_shape(returned, axes_num=3, layout=(1, 3)) + self.assert_numpy_array_equal(returned, axes[0]) + self.assertIs(returned[0].figure, fig) + returned = df.hist(by='classroom', ax=axes[1]) + self._check_axes_shape(returned, axes_num=3, layout=(1, 3)) + self.assert_numpy_array_equal(returned, axes[1]) + self.assertIs(returned[0].figure, fig) + + with tm.assertRaises(ValueError): + fig, axes = self.plt.subplots(2, 3) + # pass different number of axes from required + axes = df.hist(column='height', ax=axes) + @slow def test_axis_share_x(self): df = self.hist_df # GH4089 diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 7d0eaea5b36d6..18fc2bead02ec 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -246,7 +246,8 @@ def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False, df = frame._get_numeric_data() n = df.columns.size - fig, axes = _subplots(nrows=n, ncols=n, figsize=figsize, ax=ax, + naxes = n * n + fig, axes = _subplots(naxes=naxes, figsize=figsize, ax=ax, squeeze=False) # no gaps between subplots @@ -752,6 +753,7 @@ class MPLPlot(object): data : """ + _layout_type = 'vertical' _default_rot = 0 orientation = None @@ -767,7 +769,7 @@ def __init__(self, data, kind=None, by=None, subplots=False, sharex=True, xticks=None, yticks=None, sort_columns=False, fontsize=None, secondary_y=False, colormap=None, - table=False, **kwds): + table=False, layout=None, **kwds): self.data = data self.by = by @@ -780,6 +782,7 @@ def __init__(self, data, kind=None, by=None, subplots=False, sharex=True, self.sharex = sharex self.sharey = sharey self.figsize = figsize + self.layout = layout self.xticks = xticks self.yticks = yticks @@ -932,22 +935,22 @@ def _maybe_right_yaxis(self, ax): def _setup_subplots(self): if self.subplots: - nrows, ncols = self._get_layout() - fig, axes = _subplots(nrows=nrows, ncols=ncols, + fig, axes = _subplots(naxes=self.nseries, sharex=self.sharex, sharey=self.sharey, - figsize=self.figsize, ax=self.ax) - if not com.is_list_like(axes): - axes = np.array([axes]) + figsize=self.figsize, ax=self.ax, + layout=self.layout, + layout_type=self._layout_type) else: if self.ax is None: fig = self.plt.figure(figsize=self.figsize) - ax = fig.add_subplot(111) + axes = fig.add_subplot(111) else: fig = self.ax.get_figure() if self.figsize is not None: fig.set_size_inches(self.figsize) - ax = self.ax - axes = [ax] + axes = self.ax + + axes = _flatten(axes) if self.logx or self.loglog: [a.set_xscale('log') for a in axes] @@ -957,12 +960,18 @@ def _setup_subplots(self): self.fig = fig self.axes = axes - def _get_layout(self): - from pandas.core.frame import DataFrame - if isinstance(self.data, DataFrame): - return (len(self.data.columns), 1) + @property + def result(self): + """ + Return result axes + """ + if self.subplots: + if self.layout is not None and not com.is_list_like(self.ax): + return self.axes.reshape(*self.layout) + else: + return self.axes else: - return (1, 1) + return self.axes[0] def _compute_plot_data(self): numeric_data = self.data.convert_objects()._get_numeric_data() @@ -1360,6 +1369,8 @@ def _get_errorbars(self, label=None, index=None, xerr=True, yerr=True): class ScatterPlot(MPLPlot): + _layout_type = 'single' + def __init__(self, data, x, y, **kwargs): MPLPlot.__init__(self, data, **kwargs) self.kwds.setdefault('c', self.plt.rcParams['patch.facecolor']) @@ -1372,8 +1383,9 @@ def __init__(self, data, x, y, **kwargs): self.x = x self.y = y - def _get_layout(self): - return (1, 1) + @property + def nseries(self): + return 1 def _make_plot(self): x, y, data = self.x, self.y, self.data @@ -1404,6 +1416,8 @@ def _post_plot_logic(self): class HexBinPlot(MPLPlot): + _layout_type = 'single' + def __init__(self, data, x, y, C=None, **kwargs): MPLPlot.__init__(self, data, **kwargs) @@ -1421,8 +1435,9 @@ def __init__(self, data, x, y, C=None, **kwargs): self.y = y self.C = C - def _get_layout(self): - return (1, 1) + @property + def nseries(self): + return 1 def _make_plot(self): import matplotlib.pyplot as plt @@ -1966,6 +1981,8 @@ def _post_plot_logic(self): class PiePlot(MPLPlot): + _layout_type = 'horizontal' + def __init__(self, data, kind=None, **kwargs): data = data.fillna(value=0) if (data < 0).any().any(): @@ -1978,13 +1995,6 @@ def _args_adjust(self): self.logx = False self.loglog = False - def _get_layout(self): - from pandas import DataFrame - if isinstance(self.data, DataFrame): - return (1, len(self.data.columns)) - else: - return (1, 1) - def _validate_color_args(self): pass @@ -2044,7 +2054,7 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True, legend=True, rot=None, ax=None, style=None, title=None, xlim=None, ylim=None, logx=False, logy=False, xticks=None, yticks=None, kind='line', sort_columns=False, fontsize=None, - secondary_y=False, **kwds): + secondary_y=False, layout=None, **kwds): """ Make line, bar, or scatter plots of DataFrame series with the index on the x-axis @@ -2116,6 +2126,8 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True, position : float Specify relative alignments for bar plot layout. From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center) + layout : tuple (optional) + (rows, columns) for the layout of the plot table : boolean, Series or DataFrame, default False If True, draw a table using the data in the DataFrame and the data will be transposed to meet matplotlib's default layout. @@ -2153,7 +2165,7 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True, xlim=xlim, ylim=ylim, title=title, grid=grid, figsize=figsize, logx=logx, logy=logy, sort_columns=sort_columns, secondary_y=secondary_y, - **kwds) + layout=layout, **kwds) elif kind in _series_kinds: if y is None and subplots is False: msg = "{0} requires either y column or 'subplots=True'" @@ -2169,9 +2181,8 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True, fontsize=fontsize, use_index=use_index, sharex=sharex, sharey=sharey, xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim, title=title, grid=grid, - figsize=figsize, - sort_columns=sort_columns, - **kwds) + figsize=figsize, layout=layout, + sort_columns=sort_columns, **kwds) else: if x is not None: if com.is_integer(x) and not frame.columns.holds_integer(): @@ -2209,14 +2220,11 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True, xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim, title=title, grid=grid, figsize=figsize, logx=logx, logy=logy, sort_columns=sort_columns, - secondary_y=secondary_y, **kwds) + secondary_y=secondary_y, layout=layout, **kwds) plot_obj.generate() plot_obj.draw() - if subplots: - return plot_obj.axes - else: - return plot_obj.axes[0] + return plot_obj.result def plot_series(series, label=None, kind='line', use_index=True, rot=None, @@ -2311,7 +2319,7 @@ def plot_series(series, label=None, kind='line', use_index=True, rot=None, plot_obj.draw() # plot_obj.ax is None if we created the first figure - return plot_obj.axes[0] + return plot_obj.result _shared_docs['boxplot'] = """ @@ -2551,12 +2559,13 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, data = data._get_numeric_data() naxes = len(data.columns) - nrows, ncols = _get_layout(naxes, layout=layout) - fig, axes = _subplots(nrows=nrows, ncols=ncols, naxes=naxes, ax=ax, squeeze=False, - sharex=sharex, sharey=sharey, figsize=figsize) + fig, axes = _subplots(naxes=naxes, ax=ax, squeeze=False, + sharex=sharex, sharey=sharey, figsize=figsize, + layout=layout) + _axes = _flatten(axes) for i, col in enumerate(com._try_sort(data.columns)): - ax = axes[i // ncols, i % ncols] + ax = _axes[i] ax.hist(data[col].dropna().values, bins=bins, **kwds) ax.set_title(col) ax.grid(grid) @@ -2672,7 +2681,7 @@ def plot_group(group, ax): xrot = xrot or rot fig, axes = _grouped_plot(plot_group, data, column=column, - by=by, sharex=sharex, sharey=sharey, + by=by, sharex=sharex, sharey=sharey, ax=ax, figsize=figsize, layout=layout, rot=rot) _set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot, @@ -2730,9 +2739,9 @@ def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None, """ if subplots is True: naxes = len(grouped) - nrows, ncols = _get_layout(naxes, layout=layout) - fig, axes = _subplots(nrows=nrows, ncols=ncols, naxes=naxes, squeeze=False, - ax=ax, sharex=False, sharey=True, figsize=figsize) + fig, axes = _subplots(naxes=naxes, squeeze=False, + ax=ax, sharex=False, sharey=True, figsize=figsize, + layout=layout) axes = _flatten(axes) ret = compat.OrderedDict() @@ -2773,14 +2782,14 @@ def _grouped_plot(plotf, data, column=None, by=None, numeric_only=True, grouped = grouped[column] naxes = len(grouped) - nrows, ncols = _get_layout(naxes, layout=layout) - fig, axes = _subplots(nrows=nrows, ncols=ncols, naxes=naxes, - figsize=figsize, sharex=sharex, sharey=sharey, ax=ax) + fig, axes = _subplots(naxes=naxes, figsize=figsize, + sharex=sharex, sharey=sharey, ax=ax, + layout=layout) - ravel_axes = _flatten(axes) + _axes = _flatten(axes) for i, (key, group) in enumerate(grouped): - ax = ravel_axes[i] + ax = _axes[i] if numeric_only and isinstance(group, DataFrame): group = group._get_numeric_data() plotf(group, ax, **kwargs) @@ -2799,16 +2808,14 @@ def _grouped_plot_by_column(plotf, data, columns=None, by=None, by = [by] columns = data._get_numeric_data().columns - by naxes = len(columns) - nrows, ncols = _get_layout(naxes, layout=layout) - fig, axes = _subplots(nrows=nrows, ncols=ncols, naxes=naxes, - sharex=True, sharey=True, - figsize=figsize, ax=ax) + fig, axes = _subplots(naxes=naxes, sharex=True, sharey=True, + figsize=figsize, ax=ax, layout=layout) - ravel_axes = _flatten(axes) + _axes = _flatten(axes) result = compat.OrderedDict() for i, col in enumerate(columns): - ax = ravel_axes[i] + ax = _axes[i] gp_col = grouped[col] keys, values = zip(*gp_col) re_plotf = plotf(keys, values, ax, **kwargs) @@ -2869,7 +2876,7 @@ def table(ax, data, rowLabels=None, colLabels=None, return table -def _get_layout(nplots, layout=None): +def _get_layout(nplots, layout=None, layout_type='box'): if layout is not None: if not isinstance(layout, (tuple, list)) or len(layout) != 2: raise ValueError('Layout must be a tuple of (rows, columns)') @@ -2881,27 +2888,31 @@ def _get_layout(nplots, layout=None): return layout - if nplots == 1: + if layout_type == 'single': return (1, 1) - elif nplots == 2: - return (1, 2) - elif nplots < 4: - return (2, 2) + elif layout_type == 'horizontal': + return (1, nplots) + elif layout_type == 'vertical': + return (nplots, 1) - k = 1 - while k ** 2 < nplots: - k += 1 - - if (k - 1) * k >= nplots: - return k, (k - 1) - else: - return k, k + layouts = {1: (1, 1), 2: (1, 2), 3: (2, 2), 4: (2, 2)} + try: + return layouts[nplots] + except KeyError: + k = 1 + while k ** 2 < nplots: + k += 1 + + if (k - 1) * k >= nplots: + return k, (k - 1) + else: + return k, k -# copied from matplotlib/pyplot.py for compatibility with matplotlib < 1.0 +# copied from matplotlib/pyplot.py and modified for pandas.plotting -def _subplots(nrows=1, ncols=1, naxes=None, sharex=False, sharey=False, squeeze=True, - subplot_kw=None, ax=None, **fig_kw): +def _subplots(naxes=None, sharex=False, sharey=False, squeeze=True, + subplot_kw=None, ax=None, layout=None, layout_type='box', **fig_kw): """Create a figure with a set of subplots already made. This utility wrapper makes it convenient to create common layouts of @@ -2909,12 +2920,6 @@ def _subplots(nrows=1, ncols=1, naxes=None, sharex=False, sharey=False, squeeze= Keyword arguments: - nrows : int - Number of rows of the subplot grid. Defaults to 1. - - ncols : int - Number of columns of the subplot grid. Defaults to 1. - naxes : int Number of required axes. Exceeded axes are set invisible. Default is nrows * ncols. @@ -2942,11 +2947,17 @@ def _subplots(nrows=1, ncols=1, naxes=None, sharex=False, sharey=False, squeeze= ax : Matplotlib axis object, optional + layout : tuple + Number of rows and columns of the subplot grid. + If not specified, calculated from naxes and layout_type + + layout_type : {'box', 'horziontal', 'vertical'}, default 'box' + Specify how to layout the subplot grid. + fig_kw : Other keyword arguments to be passed to the figure() call. Note that all keywords not recognized above will be automatically included here. - Returns: fig, ax : tuple @@ -2975,23 +2986,27 @@ def _subplots(nrows=1, ncols=1, naxes=None, sharex=False, sharey=False, squeeze= plt.subplots(2, 2, subplot_kw=dict(polar=True)) """ import matplotlib.pyplot as plt - from pandas.core.frame import DataFrame if subplot_kw is None: subplot_kw = {} - # Create empty object array to hold all axes. It's easiest to make it 1-d - # so we can just append subplots upon creation, and then - nplots = nrows * ncols - - if naxes is None: - naxes = nrows * ncols - elif nplots < naxes: - raise ValueError("naxes {0} is larger than layour size defined by nrows * ncols".format(naxes)) - if ax is None: fig = plt.figure(**fig_kw) else: + if com.is_list_like(ax): + ax = _flatten(ax) + if layout is not None: + warnings.warn("When passing multiple axes, layout keyword is ignored", UserWarning) + if sharex or sharey: + warnings.warn("When passing multiple axes, sharex and sharey are ignored." + "These settings must be specified when creating axes", UserWarning) + if len(ax) == naxes: + fig = ax[0].get_figure() + return fig, ax + else: + raise ValueError("The number of passed axes must be {0}, the same as " + "the output plot".format(naxes)) + fig = ax.get_figure() # if ax is passed and a number of subplots is 1, return ax as it is if naxes == 1: @@ -3004,6 +3019,11 @@ def _subplots(nrows=1, ncols=1, naxes=None, sharex=False, sharey=False, squeeze= "is being cleared", UserWarning) fig.clear() + nrows, ncols = _get_layout(naxes, layout=layout, layout_type=layout_type) + nplots = nrows * ncols + + # Create empty object array to hold all axes. It's easiest to make it 1-d + # so we can just append subplots upon creation, and then axarr = np.empty(nplots, dtype=object) # Create first subplot separately, so we can share it if requested @@ -3074,10 +3094,10 @@ def _subplots(nrows=1, ncols=1, naxes=None, sharex=False, sharey=False, squeeze= def _flatten(axes): if not com.is_list_like(axes): - axes = [axes] + return np.array([axes]) elif isinstance(axes, (np.ndarray, Index)): - axes = axes.ravel() - return axes + return axes.ravel() + return np.array(axes) def _get_all_lines(ax):
- Added `layout` keyword to `plot_frame` (Closes #6667) - Allow to pass multiple axes to `plot_frame`, `hist` and `boxplot` (Closes #5353, Closes #6970, Closes #7069) ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import pandas.util.testing as tm n = 20 gender = tm.choice(['Male', 'Female'], size=n) classroom = tm.choice(['A', 'B', 'C'], size=n) df = pd.DataFrame({'gender': gender, 'classroom': classroom, 'height': np.random.normal(66, 4, size=n), 'weight': np.random.normal(161, 32, size=n), 'category': np.random.randint(4, size=n)}) fig, axes = plt.subplots(6, 3, figsize=(6, 7)) df.boxplot(by='category', column=['height', 'weight', 'category'], ax=axes[0]) df.groupby('classroom').boxplot(column=['height', 'weight', 'category'], ax=axes[1]) df.hist(column=['height', 'weight', 'category'], ax=axes[2]) df.hist(by='classroom', ax=axes[3]) df.plot(subplots=True, ax=axes[4], legend=False) df.plot(kind='pie', subplots=True, ax=axes[5], legend=False) plt.subplots_adjust(hspace=1, bottom=0.05) ``` ### Result ![multiax](https://cloud.githubusercontent.com/assets/1696302/3561467/f1ce4c82-09c2-11e4-8623-08b1bd519ebc.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/7736
2014-07-12T12:51:02Z
2014-08-19T17:09:15Z
2014-08-19T17:09:14Z
2014-09-10T12:10:57Z
BUG: DTI.value_counts doesnt preserve tz
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index 0f430e249f1c4..7e0931ca1b745 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -290,6 +290,12 @@ Bug Fixes +- Bug in ``DatetimeIndex.value_counts`` doesn't preserve tz (:issue:`7735`) +- Bug in ``PeriodIndex.value_counts`` results in ``Int64Index`` (:issue:`7735`) + + + + diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index cb6f200b259db..4abb6ed10d6a7 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -197,6 +197,7 @@ def value_counts(values, sort=True, ascending=False, normalize=False, from pandas.core.series import Series from pandas.tools.tile import cut + is_period = getattr(values, 'inferred_type', None) == 'period' values = Series(values).values is_category = com.is_categorical_dtype(values.dtype) @@ -212,11 +213,8 @@ def value_counts(values, sort=True, ascending=False, normalize=False, values = cat.codes dtype = values.dtype - if com.is_integer_dtype(dtype): - values = com._ensure_int64(values) - keys, counts = htable.value_count_int64(values) - elif issubclass(values.dtype.type, (np.datetime64, np.timedelta64)): + if issubclass(values.dtype.type, (np.datetime64, np.timedelta64)) or is_period: values = values.view(np.int64) keys, counts = htable.value_count_int64(values) @@ -227,6 +225,10 @@ def value_counts(values, sort=True, ascending=False, normalize=False, # convert the keys back to the dtype we came in keys = keys.astype(dtype) + elif com.is_integer_dtype(dtype): + values = com._ensure_int64(values) + keys, counts = htable.value_count_int64(values) + else: values = com._ensure_object(values) mask = com.isnull(values) diff --git a/pandas/core/base.py b/pandas/core/base.py index 243e34e35784a..d55196b56c784 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -275,8 +275,18 @@ def value_counts(self, normalize=False, sort=True, ascending=False, counts : Series """ from pandas.core.algorithms import value_counts - return value_counts(self.values, sort=sort, ascending=ascending, - normalize=normalize, bins=bins, dropna=dropna) + from pandas.tseries.api import DatetimeIndex, PeriodIndex + result = value_counts(self, sort=sort, ascending=ascending, + normalize=normalize, bins=bins, dropna=dropna) + + if isinstance(self, PeriodIndex): + # preserve freq + result.index = self._simple_new(result.index.values, self.name, + freq=self.freq) + elif isinstance(self, DatetimeIndex): + result.index = self._simple_new(result.index.values, self.name, + tz=getattr(self, 'tz', None)) + return result def unique(self): """ @@ -542,5 +552,3 @@ def __sub__(self, other): def _add_delta(self, other): return NotImplemented - - diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index 1b7db1451f6cf..494c0ee6b2bec 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -267,8 +267,9 @@ def test_value_counts_unique_nunique(self): # skips int64 because it doesn't allow to include nan or None continue - if o.values.dtype == 'datetime64[ns]' and _np_version_under1p7: - # Unable to assign None + if ((isinstance(o, Int64Index) and not isinstance(o, + (DatetimeIndex, PeriodIndex)))): + # skips int64 because it doesn't allow to include nan or None continue # special assign to the numpy array @@ -283,12 +284,8 @@ def test_value_counts_unique_nunique(self): else: o = klass(np.repeat(values, range(1, len(o) + 1))) - if isinstance(o, DatetimeIndex): - expected_s_na = Series(list(range(10, 2, -1)) + [3], index=values[9:0:-1]) - expected_s = Series(list(range(10, 2, -1)), index=values[9:1:-1]) - else: - expected_s_na = Series(list(range(10, 2, -1)) +[3], index=values[9:0:-1], dtype='int64') - expected_s = Series(list(range(10, 2, -1)), index=values[9:1:-1], dtype='int64') + expected_s_na = Series(list(range(10, 2, -1)) +[3], index=values[9:0:-1], dtype='int64') + expected_s = Series(list(range(10, 2, -1)), index=values[9:1:-1], dtype='int64') tm.assert_series_equal(o.value_counts(dropna=False), expected_s_na) tm.assert_series_equal(o.value_counts(), expected_s) @@ -709,6 +706,28 @@ def test_sub_isub(self): rng -= 1 tm.assert_index_equal(rng, expected) + def test_value_counts(self): + # GH 7735 + for tz in [None, 'UTC', 'Asia/Tokyo', 'US/Eastern']: + idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10) + # create repeated values, 'n'th element is repeated by n+1 times + idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)), tz=tz) + + exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10, tz=tz) + expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64') + tm.assert_series_equal(idx.value_counts(), expected) + + idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00', '2013-01-01 09:00', + '2013-01-01 08:00', '2013-01-01 08:00', pd.NaT], tz=tz) + + exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'], tz=tz) + expected = Series([3, 2], index=exp_idx) + tm.assert_series_equal(idx.value_counts(), expected) + + exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00', pd.NaT], tz=tz) + expected = Series([3, 2, 1], index=exp_idx) + tm.assert_series_equal(idx.value_counts(dropna=False), expected) + class TestPeriodIndexOps(Ops): _allowed = '_allow_period_index_ops' @@ -968,6 +987,30 @@ def test_sub_isub(self): rng -= 1 tm.assert_index_equal(rng, expected) + def test_value_counts(self): + # GH 7735 + idx = pd.period_range('2011-01-01 09:00', freq='H', periods=10) + # create repeated values, 'n'th element is repeated by n+1 times + idx = PeriodIndex(np.repeat(idx.values, range(1, len(idx) + 1)), freq='H') + + exp_idx = PeriodIndex(['2011-01-01 18:00', '2011-01-01 17:00', '2011-01-01 16:00', + '2011-01-01 15:00', '2011-01-01 14:00', '2011-01-01 13:00', + '2011-01-01 12:00', '2011-01-01 11:00', '2011-01-01 10:00', + '2011-01-01 09:00'], freq='H') + expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64') + tm.assert_series_equal(idx.value_counts(), expected) + + idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 09:00', '2013-01-01 09:00', + '2013-01-01 08:00', '2013-01-01 08:00', pd.NaT], freq='H') + + exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00'], freq='H') + expected = Series([3, 2], index=exp_idx) + tm.assert_series_equal(idx.value_counts(), expected) + + exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00', pd.NaT], freq='H') + expected = Series([3, 2, 1], index=exp_idx) + tm.assert_series_equal(idx.value_counts(dropna=False), expected) + if __name__ == '__main__': import nose
Found 2 problems related to `value_counts`. - `DatetimeIndex.value_counts` loses tz. ``` didx = pd.date_range('2011-01-01 09:00', freq='H', periods=3, tz='Asia/Tokyo') print(didx.value_counts()) #2011-01-01 00:00:00 1 #2011-01-01 01:00:00 1 #2011-01-01 02:00:00 1 # dtype: int64 ``` - `PeriodIndex.value_counts` results in `Int64Index`, and unable to drop `NaT`. ``` pidx = pd.PeriodIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], freq='H') print(pidx.value_counts()) # 359410 1 # 359409 1 # -9223372036854775808 1 # dtype: int64 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7735
2014-07-12T11:54:36Z
2014-07-25T21:04:51Z
2014-07-25T21:04:51Z
2014-07-26T13:27:25Z
Specify in docs that join='outer' is the defaul for align method.
diff --git a/doc/source/basics.rst b/doc/source/basics.rst index ec8456089f452..4d67616c5cd60 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -869,7 +869,7 @@ Aligning objects with each other with ``align`` The ``align`` method is the fastest way to simultaneously align two objects. It supports a ``join`` argument (related to :ref:`joining and merging <merging>`): - - ``join='outer'``: take the union of the indexes + - ``join='outer'``: take the union of the indexes (default) - ``join='left'``: use the calling object's index - ``join='right'``: use the passed object's index - ``join='inner'``: intersect the indexes
https://api.github.com/repos/pandas-dev/pandas/pulls/7734
2014-07-12T10:27:01Z
2014-07-12T11:00:26Z
2014-07-12T11:00:26Z
2014-07-12T11:00:48Z
BUG: Repeated timeseries plot may result in incorrect kind
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index 06c93541a7783..4eebcd4c000a3 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -199,12 +199,18 @@ Bug Fixes - Bug in ``HDFStore.select_column()`` not preserving UTC timezone info when selecting a DatetimeIndex (:issue:`7777`) + - Bug in pickles contains ``DateOffset`` may raise ``AttributeError`` when ``normalize`` attribute is reffered internally (:issue:`7748`) - Bug in pickle deserialization that failed for pre-0.14.1 containers with dup items trying to avoid ambiguity when matching block and manager items, when there's only one block there's no ambiguity (:issue:`7794`) + +- Bug in repeated timeseries line and area plot may result in ``ValueError`` or incorrect kind (:issue:`7733`) + + + - Bug in ``is_superperiod`` and ``is_subperiod`` cannot handle higher frequencies than ``S`` (:issue:`7760`, :issue:`7772`, :issue:`7803`) - Bug in ``DataFrame.reset_index`` which has ``MultiIndex`` contains ``PeriodIndex`` or ``DatetimeIndex`` with tz raises ``ValueError`` (:issue:`7746`, :issue:`7793`) diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 9d6391c58e2d5..ea7f963f79f28 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -1564,10 +1564,8 @@ def _make_plot(self): label = com.pprint_thing(label) # .encode('utf-8') kwds['label'] = label - y_values = self._get_stacked_values(y, label) - newlines = plotf(ax, x, y_values, style=style, **kwds) - self._update_prior(y) + newlines = plotf(ax, x, y, style=style, column_num=i, **kwds) self._add_legend_handle(newlines[0], label, index=i) lines = _get_all_lines(ax) @@ -1586,6 +1584,18 @@ def _get_stacked_values(self, y, label): else: return y + def _get_plot_function(self): + f = MPLPlot._get_plot_function(self) + def plotf(ax, x, y, style=None, column_num=None, **kwds): + # column_num is used to get the target column from protf in line and area plots + if column_num == 0: + self._initialize_prior(len(self.data)) + y_values = self._get_stacked_values(y, kwds['label']) + lines = f(ax, x, y_values, style=style, **kwds) + self._update_prior(y) + return lines + return plotf + def _get_ts_plot_function(self): from pandas.tseries.plotting import tsplot plotf = self._get_plot_function() @@ -1678,11 +1688,13 @@ def _get_plot_function(self): raise ValueError("Log-y scales are not supported in area plot") else: f = MPLPlot._get_plot_function(self) - def plotf(ax, x, y, style=None, **kwds): - lines = f(ax, x, y, style=style, **kwds) + def plotf(ax, x, y, style=None, column_num=0, **kwds): + if column_num == 0: + self._initialize_prior(len(self.data)) + y_values = self._get_stacked_values(y, kwds['label']) + lines = f(ax, x, y_values, style=style, **kwds) - # get data from the line - # insert fill_between starting point + # get data from the line to get coordinates for fill_between xdata, y_values = lines[0].get_data(orig=False) if (y >= 0).all(): @@ -1696,6 +1708,7 @@ def plotf(ax, x, y, style=None, **kwds): kwds['color'] = lines[0].get_color() self.plt.Axes.fill_between(ax, xdata, start, y_values, **kwds) + self._update_prior(y) return lines return plotf diff --git a/pandas/tseries/plotting.py b/pandas/tseries/plotting.py index 33a14403b0f08..b95553f87ec6b 100644 --- a/pandas/tseries/plotting.py +++ b/pandas/tseries/plotting.py @@ -60,8 +60,7 @@ def tsplot(series, plotf, **kwargs): # how to make sure ax.clear() flows through? if not hasattr(ax, '_plot_data'): ax._plot_data = [] - ax._plot_data.append((series, kwargs)) - + ax._plot_data.append((series, plotf, kwargs)) lines = plotf(ax, series.index, series.values, **kwargs) # set date formatter, locators and rescale limits @@ -118,7 +117,7 @@ def _is_sup(f1, f2): def _upsample_others(ax, freq, plotf, kwargs): legend = ax.get_legend() - lines, labels = _replot_ax(ax, freq, plotf, kwargs) + lines, labels = _replot_ax(ax, freq, kwargs) other_ax = None if hasattr(ax, 'left_ax'): @@ -127,7 +126,7 @@ def _upsample_others(ax, freq, plotf, kwargs): other_ax = ax.right_ax if other_ax is not None: - rlines, rlabels = _replot_ax(other_ax, freq, plotf, kwargs) + rlines, rlabels = _replot_ax(other_ax, freq, kwargs) lines.extend(rlines) labels.extend(rlabels) @@ -139,7 +138,7 @@ def _upsample_others(ax, freq, plotf, kwargs): ax.legend(lines, labels, loc='best', title=title) -def _replot_ax(ax, freq, plotf, kwargs): +def _replot_ax(ax, freq, kwargs): data = getattr(ax, '_plot_data', None) ax._plot_data = [] ax.clear() @@ -148,7 +147,7 @@ def _replot_ax(ax, freq, plotf, kwargs): lines = [] labels = [] if data is not None: - for series, kwds in data: + for series, plotf, kwds in data: series = series.copy() idx = series.index.asfreq(freq, how='S') series.index = idx diff --git a/pandas/tseries/tests/test_plotting.py b/pandas/tseries/tests/test_plotting.py index 5742b8e9bfaae..b52dca76f2c77 100644 --- a/pandas/tseries/tests/test_plotting.py +++ b/pandas/tseries/tests/test_plotting.py @@ -704,9 +704,81 @@ def test_from_weekly_resampling(self): low = Series(np.random.randn(len(idxl)), idxl) low.plot() ax = high.plot() + + expected_h = idxh.to_period().asi8 + expected_l = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544, 1549, + 1553, 1558, 1562]) for l in ax.get_lines(): self.assertTrue(PeriodIndex(data=l.get_xdata()).freq.startswith('W')) + xdata = l.get_xdata(orig=False) + if len(xdata) == 12: # idxl lines + self.assert_numpy_array_equal(xdata, expected_l) + else: + self.assert_numpy_array_equal(xdata, expected_h) + + @slow + def test_from_resampling_area_line_mixed(self): + idxh = date_range('1/1/1999', periods=52, freq='W') + idxl = date_range('1/1/1999', periods=12, freq='M') + high = DataFrame(np.random.rand(len(idxh), 3), + index=idxh, columns=[0, 1, 2]) + low = DataFrame(np.random.rand(len(idxl), 3), + index=idxl, columns=[0, 1, 2]) + + # low to high + for kind1, kind2 in [('line', 'area'), ('area', 'line')]: + ax = low.plot(kind=kind1, stacked=True) + ax = high.plot(kind=kind2, stacked=True, ax=ax) + + # check low dataframe result + expected_x = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544, 1549, + 1553, 1558, 1562]) + expected_y = np.zeros(len(expected_x)) + for i in range(3): + l = ax.lines[i] + self.assertTrue(PeriodIndex(data=l.get_xdata()).freq.startswith('W')) + self.assert_numpy_array_equal(l.get_xdata(orig=False), expected_x) + # check stacked values are correct + expected_y += low[i].values + self.assert_numpy_array_equal(l.get_ydata(orig=False), expected_y) + + # check high dataframe result + expected_x = idxh.to_period().asi8 + expected_y = np.zeros(len(expected_x)) + for i in range(3): + l = ax.lines[3 + i] + self.assertTrue(PeriodIndex(data=l.get_xdata()).freq.startswith('W')) + self.assert_numpy_array_equal(l.get_xdata(orig=False), expected_x) + expected_y += high[i].values + self.assert_numpy_array_equal(l.get_ydata(orig=False), expected_y) + + # high to low + for kind1, kind2 in [('line', 'area'), ('area', 'line')]: + ax = high.plot(kind=kind1, stacked=True) + ax = low.plot(kind=kind2, stacked=True, ax=ax) + + # check high dataframe result + expected_x = idxh.to_period().asi8 + expected_y = np.zeros(len(expected_x)) + for i in range(3): + l = ax.lines[i] + self.assertTrue(PeriodIndex(data=l.get_xdata()).freq.startswith('W')) + self.assert_numpy_array_equal(l.get_xdata(orig=False), expected_x) + expected_y += high[i].values + self.assert_numpy_array_equal(l.get_ydata(orig=False), expected_y) + + # check low dataframe result + expected_x = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544, 1549, + 1553, 1558, 1562]) + expected_y = np.zeros(len(expected_x)) + for i in range(3): + l = ax.lines[3 + i] + self.assertTrue(PeriodIndex(data=l.get_xdata()).freq.startswith('W')) + self.assert_numpy_array_equal(l.get_xdata(orig=False), expected_x) + expected_y += low[i].values + self.assert_numpy_array_equal(l.get_ydata(orig=False), expected_y) + @slow def test_mixed_freq_second_millisecond(self): # GH 7772, GH 7760
Must be revisited after #7717. Repeated line and area plot may result incorrect if it requires resampling. ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt fig, axes = plt.subplots(2, 2, figsize=(7, 5)) np.random.seed(1) df1 = pd.DataFrame(np.random.rand(5, 2), pd.date_range('2011-01-01', periods=5, freq='D')) df2 = pd.DataFrame(np.random.rand(2, 2), pd.date_range('2011-01-01', periods=2, freq='M')) df1.plot(kind='line', ax=axes[0][0], legend=False) df2.plot(kind='area', ax=axes[0][0], legend=False) df1.plot(kind='area', ax=axes[1][0], legend=False) df2.plot(kind='line', ax=axes[1][0], legend=False) df2.plot(kind='line', ax=axes[0][1], legend=False) df1.plot(kind='area', ax=axes[0][1], legend=False) # ValueError: Argument dimensions are incompatible df2.plot(kind='area', ax=axes[1][1], legend=False) df1.plot(kind='line', ax=axes[1][1], legend=False) ``` ### Result using current master - line with low freq -> area with high freq results in `ValueError` (top-right axes) - area with low freq -> line with high freq results in all lines, not area (bottom-right axes) ![figure_ng](https://cloud.githubusercontent.com/assets/1696302/3560065/dd93e976-0958-11e4-83e1-acd1dbffa1df.png) ### Result after fix ![figure_ok](https://cloud.githubusercontent.com/assets/1696302/3560079/622f2eb6-0959-11e4-90ed-5e7eee5dea52.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/7733
2014-07-12T00:14:34Z
2014-07-24T12:59:45Z
2014-07-24T12:59:45Z
2014-07-25T20:44:05Z
BUG: allow get default value upon IndexError, GH #7725
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index d776848de40d0..116608e5f8817 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -152,6 +152,7 @@ There are no experimental changes in 0.15.0 Bug Fixes ~~~~~~~~~ +- Bug in ``get`` where an ``IndexError`` would not cause the default value to be returned (:issue:`7725`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 59a457229d512..8daad2e76fae0 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1038,7 +1038,7 @@ def get(self, key, default=None): """ try: return self[key] - except (KeyError, ValueError): + except (KeyError, ValueError, IndexError): return default def __getitem__(self, item): diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index 044d4054755ba..43ac8275aeb45 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -123,6 +123,23 @@ def test_get_numeric_data(self): # _get_numeric_data is includes _get_bool_data, so can't test for non-inclusion + def test_get_default(self): + + # GH 7725 + d0 = "a", "b", "c", "d" + d1 = np.arange(4, dtype='int64') + others = "e", 10 + + for data, index in ((d0, d1), (d1, d0)): + s = Series(data, index=index) + for i,d in zip(index, data): + self.assertEqual(s.get(i), d) + self.assertEqual(s.get(i, d), d) + self.assertEqual(s.get(i, "z"), d) + for other in others: + self.assertEqual(s.get(other, "z"), "z") + self.assertEqual(s.get(other, other), other) + def test_nonzero(self): # GH 4633
Fixes #7725 by adding IndexError to the tuple of caught exceptions.
https://api.github.com/repos/pandas-dev/pandas/pulls/7728
2014-07-11T01:17:23Z
2014-09-08T14:13:22Z
2014-09-08T14:13:22Z
2014-09-08T14:13:33Z
DOC: docstring for PeriodIndex
diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index ed56bdc827ede..5948fbf8e5fa7 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -20,7 +20,7 @@ import pandas.lib as lib import pandas.tslib as tslib import pandas.algos as _algos -from pandas.compat import map, zip, u +from pandas.compat import zip, u #--------------- @@ -546,13 +546,13 @@ class PeriodIndex(DatetimeIndexOpsMixin, Int64Index): end : end value, period-like, optional If periods is none, generated index will extend to first conforming period on or just past end argument - year : int or array, default None - month : int or array, default None - quarter : int or array, default None - day : int or array, default None - hour : int or array, default None - minute : int or array, default None - second : int or array, default None + year : int, array, or Series, default None + month : int, array, or Series, default None + quarter : int, array, or Series, default None + day : int, array, or Series, default None + hour : int, array, or Series, default None + minute : int, array, or Series, default None + second : int, array, or Series, default None tz : object, default None Timezone for converting datetime64 data to Periods
null
https://api.github.com/repos/pandas-dev/pandas/pulls/7721
2014-07-10T15:47:30Z
2014-07-10T15:48:55Z
2014-07-10T15:48:55Z
2014-07-10T15:48:56Z
PERF: improve perf of index iteration (GH7683)
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index f305d088e996f..5a348025d0185 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -150,6 +150,7 @@ Enhancements Performance ~~~~~~~~~~~ +- Performance improvements in ``DatetimeIndex.__iter__`` to allow faster iteration (:issue:`7683`) diff --git a/pandas/core/base.py b/pandas/core/base.py index 81e13687441de..72fcfbff677ab 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -390,6 +390,9 @@ def _ops_compat(self, name, op_accessor): is_year_start = _field_accessor('is_year_start', "Logical indicating if first day of year (defined by frequency)") is_year_end = _field_accessor('is_year_end', "Logical indicating if last day of year (defined by frequency)") + def __iter__(self): + return (self._box_func(v) for v in self.asi8) + @property def _box_func(self): """ diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 70cd95341611f..dca2947f6a7a6 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -1093,6 +1093,27 @@ def __array_finalize__(self, obj): self.name = getattr(obj, 'name', None) self._reset_identity() + def __iter__(self): + """ + Return an iterator over the boxed values + + Returns + ------- + Timestamps : ndarray + """ + + # convert in chunks of 10k for efficiency + data = self.asi8 + l = len(self) + chunksize = 10000 + chunks = int(l / chunksize) + 1 + for i in range(chunks): + start_i = i*chunksize + end_i = min((i+1)*chunksize,l) + converted = tslib.ints_to_pydatetime(data[start_i:end_i], tz=self.tz, offset=self.offset, box=True) + for v in converted: + yield v + def _wrap_union_result(self, other, result): name = self.name if self.name == other.name else None if self.tz != other.tz: @@ -1476,9 +1497,6 @@ def normalize(self): return DatetimeIndex(new_values, freq='infer', name=self.name, tz=self.tz) - def __iter__(self): - return iter(self.asobject) - def searchsorted(self, key, side='left'): if isinstance(key, np.ndarray): key = np.array(key, dtype=_NS_DTYPE, copy=False) diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index 5948fbf8e5fa7..8c4bb2f5adc5e 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -738,10 +738,6 @@ def astype(self, dtype): return Index(self.values, dtype) raise ValueError('Cannot cast PeriodIndex to dtype %s' % dtype) - def __iter__(self): - for val in self.values: - yield Period(ordinal=val, freq=self.freq) - def searchsorted(self, key, side='left'): if isinstance(key, compat.string_types): key = Period(key, freq=self.freq).ordinal diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py index 9c374716a84ee..531724cdb6837 100644 --- a/pandas/tseries/tests/test_timezones.py +++ b/pandas/tseries/tests/test_timezones.py @@ -1027,7 +1027,6 @@ def test_intersection(self): def test_timestamp_equality_different_timezones(self): utc_range = date_range('1/1/2000', periods=20, tz='UTC') - eastern_range = utc_range.tz_convert('US/Eastern') berlin_range = utc_range.tz_convert('Europe/Berlin') diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index 2fd71521b24d5..c06d8a3ba9a05 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -74,41 +74,72 @@ try: except NameError: # py3 basestring = str -def ints_to_pydatetime(ndarray[int64_t] arr, tz=None): +cdef inline object create_timestamp_from_ts(int64_t value, pandas_datetimestruct dts, object tz, object offset): + cdef _Timestamp ts_base + ts_base = _Timestamp.__new__(Timestamp, dts.year, dts.month, + dts.day, dts.hour, dts.min, + dts.sec, dts.us, tz) + + ts_base.value = value + ts_base.offset = offset + ts_base.nanosecond = dts.ps / 1000 + + return ts_base + +cdef inline object create_datetime_from_ts(int64_t value, pandas_datetimestruct dts, object tz, object offset): + return datetime(dts.year, dts.month, dts.day, dts.hour, + dts.min, dts.sec, dts.us, tz) + +def ints_to_pydatetime(ndarray[int64_t] arr, tz=None, offset=None, box=False): + # convert an i8 repr to an ndarray of datetimes or Timestamp (if box == True) + cdef: Py_ssize_t i, n = len(arr) pandas_datetimestruct dts + object dt + int64_t value ndarray[object] result = np.empty(n, dtype=object) + object (*func_create)(int64_t, pandas_datetimestruct, object, object) + + if box and util.is_string_object(offset): + from pandas.tseries.frequencies import to_offset + offset = to_offset(offset) + + if box: + func_create = create_timestamp_from_ts + else: + func_create = create_datetime_from_ts if tz is not None: if _is_utc(tz): for i in range(n): - if arr[i] == iNaT: - result[i] = np.nan + value = arr[i] + if value == iNaT: + result[i] = NaT else: - pandas_datetime_to_datetimestruct(arr[i], PANDAS_FR_ns, &dts) - result[i] = datetime(dts.year, dts.month, dts.day, dts.hour, - dts.min, dts.sec, dts.us, tz) + pandas_datetime_to_datetimestruct(value, PANDAS_FR_ns, &dts) + result[i] = func_create(value, dts, tz, offset) elif _is_tzlocal(tz) or _is_fixed_offset(tz): for i in range(n): - if arr[i] == iNaT: - result[i] = np.nan + value = arr[i] + if value == iNaT: + result[i] = NaT else: - pandas_datetime_to_datetimestruct(arr[i], PANDAS_FR_ns, &dts) - dt = datetime(dts.year, dts.month, dts.day, dts.hour, - dts.min, dts.sec, dts.us, tz) + pandas_datetime_to_datetimestruct(value, PANDAS_FR_ns, &dts) + dt = func_create(value, dts, tz, offset) result[i] = dt + tz.utcoffset(dt) else: trans = _get_transitions(tz) deltas = _get_deltas(tz) for i in range(n): - if arr[i] == iNaT: - result[i] = np.nan + value = arr[i] + if value == iNaT: + result[i] = NaT else: # Adjust datetime64 timestamp, recompute datetimestruct - pos = trans.searchsorted(arr[i], side='right') - 1 + pos = trans.searchsorted(value, side='right') - 1 if _treat_tz_as_pytz(tz): # find right representation of dst etc in pytz timezone new_tz = tz._tzinfos[tz._transition_info[pos]] @@ -116,19 +147,17 @@ def ints_to_pydatetime(ndarray[int64_t] arr, tz=None): # no zone-name change for dateutil tzs - dst etc represented in single object. new_tz = tz - pandas_datetime_to_datetimestruct(arr[i] + deltas[pos], - PANDAS_FR_ns, &dts) - result[i] = datetime(dts.year, dts.month, dts.day, dts.hour, - dts.min, dts.sec, dts.us, - new_tz) + pandas_datetime_to_datetimestruct(value + deltas[pos], PANDAS_FR_ns, &dts) + result[i] = func_create(value, dts, new_tz, offset) else: for i in range(n): - if arr[i] == iNaT: - result[i] = np.nan + + value = arr[i] + if value == iNaT: + result[i] = NaT else: - pandas_datetime_to_datetimestruct(arr[i], PANDAS_FR_ns, &dts) - result[i] = datetime(dts.year, dts.month, dts.day, dts.hour, - dts.min, dts.sec, dts.us) + pandas_datetime_to_datetimestruct(value, PANDAS_FR_ns, &dts) + result[i] = func_create(value, dts, None, offset) return result @@ -183,6 +212,7 @@ class Timestamp(_Timestamp): def utcnow(cls): return cls.now('UTC') + def __new__(cls, object ts_input, object offset=None, tz=None, unit=None): cdef _TSObject ts cdef _Timestamp ts_base diff --git a/vb_suite/timeseries.py b/vb_suite/timeseries.py index 2b63eeaf99550..bb55b88cf1f34 100644 --- a/vb_suite/timeseries.py +++ b/vb_suite/timeseries.py @@ -333,3 +333,28 @@ def date_range(start=None, end=None, periods=None, freq=None): timeseries_is_month_start = Benchmark('rng.is_month_start', setup, start_date=datetime(2014, 4, 1)) + +#---------------------------------------------------------------------- +# iterate over DatetimeIndex/PeriodIndex +setup = common_setup + """ +N = 1000000 +M = 10000 +idx1 = date_range(start='20140101', freq='T', periods=N) +idx2 = period_range(start='20140101', freq='T', periods=N) + +def iter_n(iterable, n=None): + i = 0 + for _ in iterable: + i += 1 + if n is not None and i > n: + break +""" + +timeseries_iter_datetimeindex = Benchmark('iter_n(idx1)', setup) + +timeseries_iter_periodindex = Benchmark('iter_n(idx2)', setup) + +timeseries_iter_datetimeindex_preexit = Benchmark('iter_n(idx1, M)', setup) + +timeseries_iter_periodindex_preexit = Benchmark('iter_n(idx2, M)', setup) +
closes #7683 `PeriodIndex` creation is still in python space, not much help ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- timeseries_iter_datetimeindex_preexit | 12.7254 | 3657.9890 | 0.0035 | timeseries_iter_datetimeindex | 679.8913 | 3726.6284 | 0.1824 | timeseries_iter_periodindex_preexit | 69.0370 | 62.8881 | 1.0978 | timeseries_iter_periodindex | 6941.9633 | 6024.2947 | 1.1523 | ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- Ratio < 1.0 means the target commit is faster then the baseline. Seed used: 1234 Target [142718e] : PERF: DatetimeIndex.__iter__ now uses ints_to_pydatetime with boxing Base [f9493ea] : Merge pull request #7713 from jorisvandenbossche/doc-fixes3 DOC: fix doc build warnings ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7720
2014-07-10T14:50:32Z
2014-07-16T00:29:51Z
2014-07-16T00:29:51Z
2014-07-16T00:30:13Z
SQL: suppress warning for BIGINT with sqlite and sqlalchemy<0.8.2 (GH7433)
diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 9a479afd86cad..23ca80d771df9 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -29,6 +29,37 @@ class DatabaseError(IOError): #------------------------------------------------------------------------------ # Helper functions +_SQLALCHEMY_INSTALLED = None + +def _is_sqlalchemy_engine(con): + global _SQLALCHEMY_INSTALLED + if _SQLALCHEMY_INSTALLED is None: + try: + import sqlalchemy + _SQLALCHEMY_INSTALLED = True + + from distutils.version import LooseVersion + ver = LooseVersion(sqlalchemy.__version__) + # For sqlalchemy versions < 0.8.2, the BIGINT type is recognized + # for a sqlite engine, which results in a warning when trying to + # read/write a DataFrame with int64 values. (GH7433) + if ver < '0.8.2': + from sqlalchemy import BigInteger + from sqlalchemy.ext.compiler import compiles + + @compiles(BigInteger, 'sqlite') + def compile_big_int_sqlite(type_, compiler, **kw): + return 'INTEGER' + except ImportError: + _SQLALCHEMY_INSTALLED = False + + if _SQLALCHEMY_INSTALLED: + import sqlalchemy + return isinstance(con, sqlalchemy.engine.Engine) + else: + return False + + def _convert_params(sql, params): """convert sql and params args to DBAPI2.0 compliant format""" args = [sql] @@ -76,17 +107,6 @@ def _parse_date_columns(data_frame, parse_dates): return data_frame -def _is_sqlalchemy_engine(con): - try: - import sqlalchemy - if isinstance(con, sqlalchemy.engine.Engine): - return True - else: - return False - except ImportError: - return False - - def execute(sql, con, cur=None, params=None): """ Execute the given SQL query using the provided connection object. @@ -271,8 +291,10 @@ def read_sql_table(table_name, con, index_col=None, coerce_float=True, read_sql_query : Read SQL query into a DataFrame. read_sql - """ + if not _is_sqlalchemy_engine(con): + raise NotImplementedError("read_sql_table only supported for " + "SQLAlchemy engines.") import sqlalchemy from sqlalchemy.schema import MetaData meta = MetaData(con) diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index 122b80c3f0076..eadcb2c9f1fdb 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -1079,6 +1079,16 @@ def test_default_date_load(self): self.assertFalse(issubclass(df.DateCol.dtype.type, np.datetime64), "DateCol loaded with incorrect type") + def test_bigint_warning(self): + # test no warning for BIGINT (to support int64) is raised (GH7433) + df = DataFrame({'a':[1,2]}, dtype='int64') + df.to_sql('test_bigintwarning', self.conn, index=False) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + sql.read_sql_table('test_bigintwarning', self.conn) + self.assertEqual(len(w), 0, "Warning triggered for other table") + class TestMySQLAlchemy(_TestSQLAlchemy): """
From discussion here: https://github.com/pydata/pandas/pull/7634#issuecomment-48111148 Due to switching from Integer to BigInteger (to support int64 on some database systems), reading a table from sqlite with integers leads to a warning when you have an slqalchemy version of below 0.8.2. I know it is very very late and goes against all reservations of putting in new stuff just before a release, but after some more consideration, I think we should include this (or at least something that fixes it, and I think this does). @jreback, you said not to worry about it (it is just a warning), but the sqlalchemy release that fixes it is only just a year old and this is something most users will try as the first thing I think when using the sqlalchemy functions (writing/reading simple dataframe with some numbers with sqlite), so should not get a warning they don't understand. I tested it locally with sqlalchemy 0.7.8 (on Windows), and on travis it is tested with 0.7.1 (the py2.6 build) and there also the warnings disappeared. What do you think?
https://api.github.com/repos/pandas-dev/pandas/pulls/7719
2014-07-10T14:24:58Z
2014-07-10T22:34:56Z
2014-07-10T22:34:56Z
2014-07-10T22:35:31Z
CLN: Simplify LinePlot flow
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index d3ea809b79b76..6124da58995d8 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -755,9 +755,9 @@ class MPLPlot(object): _default_rot = 0 _pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog', - 'mark_right'] + 'mark_right', 'stacked'] _attr_defaults = {'logy': False, 'logx': False, 'loglog': False, - 'mark_right': True} + 'mark_right': True, 'stacked': False} def __init__(self, data, kind=None, by=None, subplots=False, sharex=True, sharey=False, use_index=True, @@ -1080,7 +1080,6 @@ def _make_legend(self): for ax in self.axes: ax.legend(loc='best') - def _get_ax_legend(self, ax): leg = ax.get_legend() other_ax = (getattr(ax, 'right_ax', None) or @@ -1139,12 +1138,22 @@ def _get_plot_function(self): Returns the matplotlib plotting function (plot or errorbar) based on the presence of errorbar keywords. ''' - - if all(e is None for e in self.errors.values()): - plotf = self.plt.Axes.plot - else: - plotf = self.plt.Axes.errorbar - + errorbar = any(e is not None for e in self.errors.values()) + def plotf(ax, x, y, style=None, **kwds): + mask = com.isnull(y) + if mask.any(): + y = np.ma.array(y) + y = np.ma.masked_where(mask, y) + + if errorbar: + return self.plt.Axes.errorbar(ax, x, y, **kwds) + else: + # prevent style kwarg from going to errorbar, where it is unsupported + if style is not None: + args = (ax, x, y, style) + else: + args = (ax, x, y) + return self.plt.Axes.plot(*args, **kwds) return plotf def _get_index_name(self): @@ -1472,11 +1481,9 @@ def _post_plot_logic(self): class LinePlot(MPLPlot): def __init__(self, data, **kwargs): - self.stacked = kwargs.pop('stacked', False) - if self.stacked: - data = data.fillna(value=0) - MPLPlot.__init__(self, data, **kwargs) + if self.stacked: + self.data = self.data.fillna(value=0) self.x_compat = plot_params['x_compat'] if 'x_compat' in self.kwds: self.x_compat = bool(self.kwds.pop('x_compat')) @@ -1533,56 +1540,39 @@ def _is_ts_plot(self): return not self.x_compat and self.use_index and self._use_dynamic_x() def _make_plot(self): - self._pos_prior = np.zeros(len(self.data)) - self._neg_prior = np.zeros(len(self.data)) + self._initialize_prior(len(self.data)) if self._is_ts_plot(): data = self._maybe_convert_index(self.data) - self._make_ts_plot(data) + x = data.index # dummy, not used + plotf = self._get_ts_plot_function() + it = self._iter_data(data=data, keep_index=True) else: x = self._get_xticks(convert_period=True) - plotf = self._get_plot_function() - colors = self._get_colors() - - for i, (label, y) in enumerate(self._iter_data()): - ax = self._get_ax(i) - style = self._get_style(i, label) - kwds = self.kwds.copy() - self._maybe_add_color(colors, kwds, style, i) + it = self._iter_data() - errors = self._get_errorbars(label=label, index=i) - kwds = dict(kwds, **errors) - - label = com.pprint_thing(label) # .encode('utf-8') - kwds['label'] = label - - y_values = self._get_stacked_values(y, label) - - if not self.stacked: - mask = com.isnull(y_values) - if mask.any(): - y_values = np.ma.array(y_values) - y_values = np.ma.masked_where(mask, y_values) + colors = self._get_colors() + for i, (label, y) in enumerate(it): + ax = self._get_ax(i) + style = self._get_style(i, label) + kwds = self.kwds.copy() + self._maybe_add_color(colors, kwds, style, i) - # prevent style kwarg from going to errorbar, where it is unsupported - if style is not None and plotf.__name__ != 'errorbar': - args = (ax, x, y_values, style) - else: - args = (ax, x, y_values) + errors = self._get_errorbars(label=label, index=i) + kwds = dict(kwds, **errors) - newlines = plotf(*args, **kwds) - self._add_legend_handle(newlines[0], label, index=i) + label = com.pprint_thing(label) # .encode('utf-8') + kwds['label'] = label + y_values = self._get_stacked_values(y, label) - if self.stacked and not self.subplots: - if (y >= 0).all(): - self._pos_prior += y - elif (y <= 0).all(): - self._neg_prior += y + newlines = plotf(ax, x, y_values, style=style, **kwds) + self._update_prior(y) + self._add_legend_handle(newlines[0], label, index=i) - lines = _get_all_lines(ax) - left, right = _get_xlim(lines) - ax.set_xlim(left, right) + lines = _get_all_lines(ax) + left, right = _get_xlim(lines) + ax.set_xlim(left, right) def _get_stacked_values(self, y, label): if self.stacked: @@ -1599,46 +1589,26 @@ def _get_stacked_values(self, y, label): def _get_ts_plot_function(self): from pandas.tseries.plotting import tsplot plotf = self._get_plot_function() - - def _plot(data, ax, label, style, **kwds): - # errorbar function does not support style argument - if plotf.__name__ == 'errorbar': - lines = tsplot(data, plotf, ax=ax, label=label, - **kwds) - return lines - else: - lines = tsplot(data, plotf, ax=ax, label=label, - style=style, **kwds) - return lines + def _plot(ax, x, data, style=None, **kwds): + # accept x to be consistent with normal plot func, + # x is not passed to tsplot as it uses data.index as x coordinate + lines = tsplot(data, plotf, ax=ax, style=style, **kwds) + return lines return _plot - def _make_ts_plot(self, data, **kwargs): - colors = self._get_colors() - plotf = self._get_ts_plot_function() - - it = self._iter_data(data=data, keep_index=True) - for i, (label, y) in enumerate(it): - ax = self._get_ax(i) - style = self._get_style(i, label) - kwds = self.kwds.copy() - - self._maybe_add_color(colors, kwds, style, i) - - errors = self._get_errorbars(label=label, index=i, xerr=False) - kwds = dict(kwds, **errors) - - label = com.pprint_thing(label) - - y_values = self._get_stacked_values(y, label) - - newlines = plotf(y_values, ax, label, style, **kwds) - self._add_legend_handle(newlines[0], label, index=i) + def _initialize_prior(self, n): + self._pos_prior = np.zeros(n) + self._neg_prior = np.zeros(n) - if self.stacked and not self.subplots: - if (y >= 0).all(): - self._pos_prior += y - elif (y <= 0).all(): - self._neg_prior += y + def _update_prior(self, y): + if self.stacked and not self.subplots: + # tsplot resample may changedata length + if len(self._pos_prior) != len(y): + self._initialize_prior(len(y)) + if (y >= 0).all(): + self._pos_prior += y + elif (y <= 0).all(): + self._neg_prior += y def _maybe_convert_index(self, data): # tsplot converts automatically, but don't want to convert index @@ -1707,13 +1677,14 @@ def _get_plot_function(self): if self.logy or self.loglog: raise ValueError("Log-y scales are not supported in area plot") else: - f = LinePlot._get_plot_function(self) - - def plotf(*args, **kwds): - lines = f(*args, **kwds) + f = MPLPlot._get_plot_function(self) + def plotf(ax, x, y, style=None, **kwds): + lines = f(ax, x, y, style=style, **kwds) + # get data from the line # insert fill_between starting point - y = args[2] + xdata, y_values = lines[0].get_data(orig=False) + if (y >= 0).all(): start = self._pos_prior elif (y <= 0).all(): @@ -1721,16 +1692,10 @@ def plotf(*args, **kwds): else: start = np.zeros(len(y)) - # get x data from the line - # to retrieve x coodinates of tsplot - xdata = lines[0].get_data()[0] - # remove style - args = (args[0], xdata, start, y) - if not 'color' in kwds: kwds['color'] = lines[0].get_color() - self.plt.Axes.fill_between(*args, **kwds) + self.plt.Axes.fill_between(ax, xdata, start, y_values, **kwds) return lines return plotf @@ -1746,15 +1711,6 @@ def _add_legend_handle(self, handle, label, index=None): def _post_plot_logic(self): LinePlot._post_plot_logic(self) - if self._is_ts_plot(): - pass - else: - if self.xlim is None: - for ax in self.axes: - lines = _get_all_lines(ax) - left, right = _get_xlim(lines) - ax.set_xlim(left, right) - if self.ylim is None: if (self.data >= 0).all().all(): for ax in self.axes: @@ -1769,12 +1725,8 @@ class BarPlot(MPLPlot): _default_rot = {'bar': 90, 'barh': 0} def __init__(self, data, **kwargs): - self.stacked = kwargs.pop('stacked', False) - self.bar_width = kwargs.pop('width', 0.5) - pos = kwargs.pop('position', 0.5) - kwargs.setdefault('align', 'center') self.tick_pos = np.arange(len(data)) diff --git a/pandas/tseries/plotting.py b/pandas/tseries/plotting.py index 6031482fd9927..33a14403b0f08 100644 --- a/pandas/tseries/plotting.py +++ b/pandas/tseries/plotting.py @@ -18,8 +18,6 @@ from pandas.tseries.converter import (PeriodConverter, TimeSeries_DateLocator, TimeSeries_DateFormatter) -from pandas.tools.plotting import _get_all_lines, _get_xlim - #---------------------------------------------------------------------- # Plotting functions and monkey patches @@ -59,25 +57,15 @@ def tsplot(series, plotf, **kwargs): # Set ax with freq info _decorate_axes(ax, freq, kwargs) - # mask missing values - args = _maybe_mask(series) - # how to make sure ax.clear() flows through? if not hasattr(ax, '_plot_data'): ax._plot_data = [] ax._plot_data.append((series, kwargs)) - # styles - style = kwargs.pop('style', None) - if style is not None: - args.append(style) - - lines = plotf(ax, *args, **kwargs) + lines = plotf(ax, series.index, series.values, **kwargs) # set date formatter, locators and rescale limits format_dateaxis(ax, ax.freq) - left, right = _get_xlim(_get_all_lines(ax)) - ax.set_xlim(left, right) # x and y coord info ax.format_coord = lambda t, y: ("t = {0} " @@ -165,8 +153,7 @@ def _replot_ax(ax, freq, plotf, kwargs): idx = series.index.asfreq(freq, how='S') series.index = idx ax._plot_data.append(series) - args = _maybe_mask(series) - lines.append(plotf(ax, *args, **kwds)[0]) + lines.append(plotf(ax, series.index, series.values, **kwds)[0]) labels.append(com.pprint_thing(series.name)) return lines, labels @@ -184,17 +171,6 @@ def _decorate_axes(ax, freq, kwargs): ax.date_axis_info = None -def _maybe_mask(series): - mask = isnull(series) - if mask.any(): - masked_array = np.ma.array(series.values) - masked_array = np.ma.masked_where(mask, masked_array) - args = [series.index, masked_array] - else: - args = [series.index, series.values] - return args - - def _get_freq(ax, series): # get frequency from data freq = getattr(series.index, 'freq', None)
Related to #7670. Made `LinePlot` to use single plotting flow in `x_compat` and `tsplot`.
https://api.github.com/repos/pandas-dev/pandas/pulls/7717
2014-07-10T13:02:54Z
2014-07-21T13:14:05Z
2014-07-21T13:14:05Z
2014-07-23T11:08:48Z
DOC: clean up 0.14.1 whatsnew file
diff --git a/doc/source/release.rst b/doc/source/release.rst index d6fbc3a9d8896..fb06dc4d61814 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -50,9 +50,20 @@ pandas 0.14.1 **Release date:** (July 11, 2014) -This is a minor release from 0.14.0 and includes a number of API changes, several new features, enhancements, and +This is a minor release from 0.14.0 and includes a small number of API changes, several new features, enhancements, and performance improvements along with a large number of bug fixes. +Highlights include: + +- New methods :meth:`~pandas.DataFrame.select_dtypes` to select columns + based on the dtype and :meth:`~pandas.Series.sem` to calculate the + standard error of the mean. +- Support for dateutil timezones (see :ref:`docs <timeseries.timezone>`). +- Support for ignoring full line comments in the :func:`~pandas.read_csv` + text parser. +- New documentation section on :ref:`Options and Settings <options>`. +- Lots of bug fixes. + See the :ref:`v0.14.1 Whatsnew <whatsnew_0141>` overview or the issue tracker on GitHub for an extensive list of all API changes, enhancements and bugs that have been fixed in 0.14.1. diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 0e6c98a1a8d23..2b5f8b2dfbb38 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -7,16 +7,21 @@ This is a minor release from 0.14.0 and includes a small number of API changes, enhancements, and performance improvements along with a large number of bug fixes. We recommend that all users upgrade to this version. -- New Documentation section on :ref:`Options and Settings <options>` +- Highlights include: -- :ref:`Enhancements <whatsnew_0141.enhancements>` + - New methods :meth:`~pandas.DataFrame.select_dtypes` to select columns + based on the dtype and :meth:`~pandas.Series.sem` to calculate the + standard error of the mean. + - Support for dateutil timezones (see :ref:`docs <timeseries.timezone>`). + - Support for ignoring full line comments in the :func:`~pandas.read_csv` + text parser. + - New documentation section on :ref:`Options and Settings <options>`. + - Lots of bug fixes. +- :ref:`Enhancements <whatsnew_0141.enhancements>` - :ref:`API Changes <whatsnew_0141.api>` - - :ref:`Performance Improvements <whatsnew_0141.performance>` - - :ref:`Experimental Changes <whatsnew_0141.experimental>` - - :ref:`Bug Fixes <whatsnew_0141.bug_fixes>` .. _whatsnew_0141.api: @@ -24,22 +29,6 @@ users upgrade to this version. API changes ~~~~~~~~~~~ -- All ``offsets`` suppports ``normalize`` keyword to specify whether ``offsets.apply``, ``rollforward`` and ``rollback`` resets time (hour, minute, etc) or not (default ``False``, preserves time) (:issue:`7156`) - - - .. ipython:: python - - import pandas.tseries.offsets as offsets - - day = offsets.Day() - day.apply(Timestamp('2014-01-01 09:00')) - - day = offsets.Day(normalize=True) - day.apply(Timestamp('2014-01-01 09:00')) - -- Improved inference of datetime/timedelta with mixed null objects. Regression from 0.13.1 in interpretation of an object Index - with all null elements (:issue:`7431`) - - Openpyxl now raises a ValueError on construction of the openpyxl writer instead of warning on pandas import (:issue:`7284`). @@ -47,68 +36,85 @@ API changes containing ``NaN`` values - now also has ``dtype=object`` instead of ``float`` (:issue:`7242`) -- ``StringMethods`` now work on empty Series (:issue:`7242`) - ``Period`` objects no longer raise a ``TypeError`` when compared using ``==`` with another object that *isn't* a ``Period``. Instead when comparing a ``Period`` with another object using ``==`` if the other object isn't a ``Period`` ``False`` is returned. (:issue:`7376`) -- Bug in ``.loc`` performing fallback integer indexing with ``object`` dtype indices (:issue:`7496`) -- Add back ``#N/A N/A`` as a default NA value in text parsing, (regresion from 0.12) (:issue:`5521`) -- Raise a ``TypeError`` on inplace-setting with a ``.where`` and a non ``np.nan`` value as this is inconsistent - with a set-item expression like ``df[mask] = None`` (:issue:`7656`) - -.. _whatsnew_0141.prior_deprecations: - -Prior Version Deprecations/Changes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +- Previously, the behaviour on resetting the time or not in + ``offsets.apply``, ``rollforward`` and ``rollback`` operations differed + between offsets. With the support of the ``normalize`` keyword for all offsets(see + below) with a default value of False (preserve time), the behaviour changed for certain + offsets (BusinessMonthBegin, MonthEnd, BusinessMonthEnd, CustomBusinessMonthEnd, + BusinessYearBegin, LastWeekOfMonth, FY5253Quarter, LastWeekOfMonth, Easter): -There are no prior version deprecations that are taking effect as of 0.14.1. + .. code-block:: python -.. _whatsnew_0141.deprecations: + In [6]: from pandas.tseries import offsets -Deprecations -~~~~~~~~~~~~ + In [7]: d = pd.Timestamp('2014-01-01 09:00') -There are no deprecations that are taking effect as of 0.14.1. + # old behaviour < 0.14.1 + In [8]: d + offsets.MonthEnd() + Out[8]: Timestamp('2014-01-31 00:00:00') -.. _whatsnew_0141.enhancements: - -Enhancements -~~~~~~~~~~~~ - - - -- Add ``dropna`` argument to ``value_counts`` and ``nunique`` (:issue:`5569`). -- Add ``NotImplementedError`` for simultaneous use of ``chunksize`` and ``nrows`` - for read_csv() (:issue:`6774`). + Starting from 0.14.1 all offsets preserve time by default. The old + behaviour can be obtained with ``normalize=True`` -- ``PeriodIndex`` is represented as the same format as ``DatetimeIndex`` (:issue:`7601`) + .. ipython:: python + :suppress: + import pandas.tseries.offsets as offsets + d = pd.Timestamp('2014-01-01 09:00') + .. ipython:: python + # new behaviour + d + offsets.MonthEnd() + d + offsets.MonthEnd(normalize=True) + Note that for the other offsets the default behaviour did not change. +- Add back ``#N/A N/A`` as a default NA value in text parsing, (regresion from 0.12) (:issue:`5521`) +- Raise a ``TypeError`` on inplace-setting with a ``.where`` and a non ``np.nan`` value as this is inconsistent + with a set-item expression like ``df[mask] = None`` (:issue:`7656`) +.. _whatsnew_0141.enhancements: +Enhancements +~~~~~~~~~~~~ +- Add ``dropna`` argument to ``value_counts`` and ``nunique`` (:issue:`5569`). - Add :meth:`~pandas.DataFrame.select_dtypes` method to allow selection of columns based on dtype (:issue:`7316`). See :ref:`the docs <basics.selectdtypes>`. +- All ``offsets`` suppports the ``normalize`` keyword to specify whether + ``offsets.apply``, ``rollforward`` and ``rollback`` resets the time (hour, + minute, etc) or not (default ``False``, preserves time) (:issue:`7156`): + .. ipython:: python + import pandas.tseries.offsets as offsets + day = offsets.Day() + day.apply(Timestamp('2014-01-01 09:00')) + + day = offsets.Day(normalize=True) + day.apply(Timestamp('2014-01-01 09:00')) + +- ``PeriodIndex`` is represented as the same format as ``DatetimeIndex`` (:issue:`7601`) +- ``StringMethods`` now work on empty Series (:issue:`7242`) - The file parsers ``read_csv`` and ``read_table`` now ignore line comments provided by the parameter `comment`, which accepts only a single character for the C reader. In particular, they allow for comments before file data begins (:issue:`2685`) +- Add ``NotImplementedError`` for simultaneous use of ``chunksize`` and ``nrows`` + for read_csv() (:issue:`6774`). - Tests for basic reading of public S3 buckets now exist (:issue:`7281`). - ``read_html`` now sports an ``encoding`` argument that is passed to the underlying parser library. You can use this to read non-ascii encoded web pages (:issue:`7323`). - ``read_excel`` now supports reading from URLs in the same way that ``read_csv`` does. (:issue:`6809`) - - - Support for dateutil timezones, which can now be used in the same way as pytz timezones across pandas. (:issue:`4688`) @@ -125,16 +131,13 @@ Enhancements - Add ``nlargest`` and ``nsmallest`` to the ``Series`` ``groupby`` whitelist, which means you can now use these methods on a ``SeriesGroupBy`` object (:issue:`7053`). - - - - All offsets ``apply``, ``rollforward`` and ``rollback`` can now handle ``np.datetime64``, previously results in ``ApplyTypeError`` (:issue:`7452`) - - ``Period`` and ``PeriodIndex`` can contain ``NaT`` in its values (:issue:`7485`) - Support pickling ``Series``, ``DataFrame`` and ``Panel`` objects with non-unique labels along *item* axis (``index``, ``columns`` and ``items`` respectively) (:issue:`7370`). - +- Improved inference of datetime/timedelta with mixed null objects. Regression from 0.13.1 in interpretation of an object Index + with all null elements (:issue:`7431`) .. _whatsnew_0141.performance: @@ -147,25 +150,20 @@ Performance - Improvements in `MultiIndex.from_product` for large iterables (:issue:`7627`) - .. _whatsnew_0141.experimental: - - - - Experimental ~~~~~~~~~~~~ - ``pandas.io.data.Options`` has a new method, ``get_all_data`` method, and now consistently returns a multi-indexed ``DataFrame``, see :ref:`the docs <remote_data.yahoo_options>`. (:issue:`5602`) - - ``io.gbq.read_gbq`` and ``io.gbq.to_gbq`` were refactored to remove the dependency on the Google ``bq.py`` command line client. This submodule now uses ``httplib2`` and the Google ``apiclient`` and ``oauth2client`` API client libraries which should be more stable and, therefore, reliable than ``bq.py``. See :ref:`the docs <io.bigquery>`. (:issue:`6937`). + .. _whatsnew_0141.bug_fixes: Bug Fixes @@ -185,10 +183,7 @@ Bug Fixes - Bug in plotting subplots with ``DataFrame.plot``, ``hist`` clears passed ``ax`` even if the number of subplots is one (:issue:`7391`). - Bug in plotting subplots with ``DataFrame.boxplot`` with ``by`` kw raises ``ValueError`` if the number of subplots exceeds 1 (:issue:`7391`). - Bug in subplots displays ``ticklabels`` and ``labels`` in different rule (:issue:`5897`) - - Bug in ``Panel.apply`` with a multi-index as an axis (:issue:`7469`) - - - Bug in ``DatetimeIndex.insert`` doesn't preserve ``name`` and ``tz`` (:issue:`7299`) - Bug in ``DatetimeIndex.asobject`` doesn't preserve ``name`` (:issue:`7299`) - Bug in multi-index slicing with datetimelike ranges (strings and Timestamps), (:issue:`7429`) @@ -246,49 +241,31 @@ Bug Fixes - Bug in ``StataReader.data`` where reading a 0-observation dta failed (:issue:`7369`) - Bug in when reading Stata 13 (117) files containing fixed width strings (:issue:`7360`) - Bug in when writing Stata files where the encoding was ignored (:issue:`7286`) - - - Bug in ``DatetimeIndex`` comparison doesn't handle ``NaT`` properly (:issue:`7529`) - - - Bug in passing input with ``tzinfo`` to some offsets ``apply``, ``rollforward`` or ``rollback`` resets ``tzinfo`` or raises ``ValueError`` (:issue:`7465`) - Bug in ``DatetimeIndex.to_period``, ``PeriodIndex.asobject``, ``PeriodIndex.to_timestamp`` doesn't preserve ``name`` (:issue:`7485`) - Bug in ``DatetimeIndex.to_period`` and ``PeriodIndex.to_timestanp`` handle ``NaT`` incorrectly (:issue:`7228`) - - Bug in ``offsets.apply``, ``rollforward`` and ``rollback`` may return normal ``datetime`` (:issue:`7502`) - - - Bug in ``resample`` raises ``ValueError`` when target contains ``NaT`` (:issue:`7227`) - - Bug in ``Timestamp.tz_localize`` resets ``nanosecond`` info (:issue:`7534`) - Bug in ``DatetimeIndex.asobject`` raises ``ValueError`` when it contains ``NaT`` (:issue:`7539`) - Bug in ``Timestamp.__new__`` doesn't preserve nanosecond properly (:issue:`7610`) - - Bug in ``Index.astype(float)`` where it would return an ``object`` dtype ``Index`` (:issue:`7464`). - Bug in ``DataFrame.reset_index`` loses ``tz`` (:issue:`3950`) - Bug in ``DatetimeIndex.freqstr`` raises ``AttributeError`` when ``freq`` is ``None`` (:issue:`7606`) - Bug in ``GroupBy.size`` created by ``TimeGrouper`` raises ``AttributeError`` (:issue:`7453`) - - Bug in single column bar plot is misaligned (:issue:`7498`). - - - - Bug in area plot with tz-aware time series raises ``ValueError`` (:issue:`7471`) - - Bug in non-monotonic ``Index.union`` may preserve ``name`` incorrectly (:issue:`7458`) - Bug in ``DatetimeIndex.intersection`` doesn't preserve timezone (:issue:`4690`) - - Bug in ``rolling_var`` where a window larger than the array would raise an error(:issue:`7297`) - - Bug with last plotted timeseries dictating ``xlim`` (:issue:`2960`) - Bug with ``secondary_y`` axis not being considered for timeseries ``xlim`` (:issue:`3490`) - - Bug in ``Float64Index`` assignment with a non scalar indexer (:issue:`7586`) - Bug in ``pandas.core.strings.str_contains`` does not properly match in a case insensitive fashion when ``regex=False`` and ``case=False`` (:issue:`7505`) - - Bug in ``expanding_cov``, ``expanding_corr``, ``rolling_cov``, and ``rolling_corr`` for two arguments with mismatched index (:issue:`7512`) - - Bug in ``to_sql`` taking the boolean column as text column (:issue:`7678`) - Bug in grouped `hist` doesn't handle `rot` kw and `sharex` kw properly (:issue:`7234`) +- Bug in ``.loc`` performing fallback integer indexing with ``object`` dtype indices (:issue:`7496`) - Bug (regression) in ``PeriodIndex`` constructor when passed ``Series`` objects (:issue:`7701`).
- removed the sections with no entries - removed whitespace - moved some entries from API-changes to Enhancements/Big fixes when it was not really a backwards incompatible change or if it was not clear what the relevant change was for users (and for the offsets normalize issue I rewrote it) Highlights should still be written to include here and in the release.rst
https://api.github.com/repos/pandas-dev/pandas/pulls/7714
2014-07-10T07:57:40Z
2014-07-10T23:08:37Z
2014-07-10T23:08:37Z
2014-07-11T20:38:28Z
DOC: fix doc build warnings
diff --git a/doc/source/io.rst b/doc/source/io.rst index 109b7a0a38fc5..cfa97ca0f3fef 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -1996,7 +1996,7 @@ Excel writer engines By default, ``pandas`` uses the `XlsxWriter`_ for ``.xlsx`` and `openpyxl`_ for ``.xlsm`` files and `xlwt`_ for ``.xls`` files. If you have multiple engines installed, you can set the default engine through :ref:`setting the -config options <basics.working_with_options>` ``io.excel.xlsx.writer`` and +config options <options>` ``io.excel.xlsx.writer`` and ``io.excel.xls.writer``. pandas will fall back on `openpyxl`_ for ``.xlsx`` files if `Xlsxwriter`_ is not available. diff --git a/pandas/core/config.py b/pandas/core/config.py index a16b32d5dd185..3e8d76500d128 100644 --- a/pandas/core/config.py +++ b/pandas/core/config.py @@ -640,7 +640,7 @@ def _build_option_description(k): _get_option(k, True)) if d: - s += u('\n\t(Deprecated') + s += u('\n (Deprecated') s += (u(', use `%s` instead.') % d.rkey if d.rkey else '') s += u(')') diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 413f3daa52a52..b97cb11906e2f 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1898,7 +1898,7 @@ def select_dtypes(self, include=None, exclude=None): * To select strings you must use the ``object`` dtype, but note that this will return *all* object dtype columns * See the `numpy dtype hierarchy - <http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__ + <http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__ Examples --------
This should fix all doc build errors/warnings (apart from the known ones).
https://api.github.com/repos/pandas-dev/pandas/pulls/7713
2014-07-09T21:01:45Z
2014-07-10T07:17:53Z
2014-07-10T07:17:53Z
2014-07-10T07:17:55Z
BUG: PeriodIndex constructor doesn't work with Series objects
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 8fde5df6fd75a..0e6c98a1a8d23 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -291,4 +291,4 @@ Bug Fixes - Bug in ``to_sql`` taking the boolean column as text column (:issue:`7678`) - Bug in grouped `hist` doesn't handle `rot` kw and `sharex` kw properly (:issue:`7234`) - +- Bug (regression) in ``PeriodIndex`` constructor when passed ``Series`` objects (:issue:`7701`). diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index cceac61f392a8..ed56bdc827ede 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -14,7 +14,7 @@ import pandas.core.common as com from pandas.core.common import (isnull, _INT64_DTYPE, _maybe_box, - _values_from_object) + _values_from_object, ABCSeries) from pandas import compat from pandas.lib import Timestamp import pandas.lib as lib @@ -1261,13 +1261,13 @@ def _range_from_fields(year=None, month=None, quarter=None, day=None, def _make_field_arrays(*fields): length = None for x in fields: - if isinstance(x, (list, np.ndarray)): + if isinstance(x, (list, np.ndarray, ABCSeries)): if length is not None and len(x) != length: raise ValueError('Mismatched Period array lengths') elif length is None: length = len(x) - arrays = [np.asarray(x) if isinstance(x, (np.ndarray, list)) + arrays = [np.asarray(x) if isinstance(x, (np.ndarray, list, ABCSeries)) else np.repeat(x, length) for x in fields] return arrays diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index 42edb799b4c89..53375b4d07796 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -1281,6 +1281,15 @@ def test_constructor_nat(self): self.assertRaises( ValueError, period_range, start='2011-01-01', end='NaT', freq='M') + def test_constructor_year_and_quarter(self): + year = pd.Series([2001, 2002, 2003]) + quarter = year - 2000 + idx = PeriodIndex(year=year, quarter=quarter) + strs = ['%dQ%d' % t for t in zip(quarter, year)] + lops = list(map(Period, strs)) + p = PeriodIndex(lops) + tm.assert_index_equal(p, idx) + def test_is_(self): create_index = lambda: PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
closes #7701
https://api.github.com/repos/pandas-dev/pandas/pulls/7712
2014-07-09T20:48:17Z
2014-07-10T15:15:02Z
2014-07-10T15:15:02Z
2014-07-10T15:44:34Z
API: Add PeriodIndex.resolution
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index eb58f46f0f3fe..e11a3730cd95a 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -117,6 +117,8 @@ Enhancements - Added support for bool, uint8, uint16 and uint32 datatypes in ``to_stata`` (:issue:`7097`, :issue:`7365`) +- ``PeriodIndex`` supports ``resolution`` as the same as ``DatetimeIndex`` (:issue:`7708`) + diff --git a/pandas/core/base.py b/pandas/core/base.py index 72fcfbff677ab..4035627b98458 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -498,3 +498,17 @@ def __unicode__(self): summary += self._format_footer() return summary + @cache_readonly + def _resolution(self): + from pandas.tseries.frequencies import Resolution + return Resolution.get_reso_from_freq(self.freqstr) + + @cache_readonly + def resolution(self): + """ + Returns day, hour, minute, second, millisecond or microsecond + """ + from pandas.tseries.frequencies import get_reso_string + return get_reso_string(self._resolution) + + diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index 832671521c815..761d79a288df3 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -605,6 +605,14 @@ def test_representation(self): result = getattr(idx, func)() self.assertEqual(result, expected) + def test_resolution(self): + for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U'], + ['day', 'day', 'day', 'day', + 'hour', 'minute', 'second', 'millisecond', 'microsecond']): + for tz in [None, 'Asia/Tokyo', 'US/Eastern']: + idx = pd.date_range(start='2013-04-01', periods=30, freq=freq, tz=tz) + self.assertEqual(idx.resolution, expected) + class TestPeriodIndexOps(Ops): _allowed = '_allow_period_index_ops' @@ -729,6 +737,14 @@ def test_representation(self): result = getattr(idx, func)() self.assertEqual(result, expected) + def test_resolution(self): + for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U'], + ['day', 'day', 'day', 'day', + 'hour', 'minute', 'second', 'millisecond', 'microsecond']): + + idx = pd.period_range(start='2013-04-01', periods=30, freq=freq) + self.assertEqual(idx.resolution, expected) + if __name__ == '__main__': import nose diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index fe61e5f0acd9b..4beccaa758006 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -45,7 +45,9 @@ class Resolution(object): RESO_HR: 'hour', RESO_DAY: 'day'} - _reso_period_map = { + _str_reso_map = dict([(v, k) for k, v in compat.iteritems(_reso_str_map)]) + + _reso_freq_map = { 'year': 'A', 'quarter': 'Q', 'month': 'M', @@ -57,13 +59,28 @@ class Resolution(object): 'microsecond': 'U', 'nanosecond': 'N'} + _freq_reso_map = dict([(v, k) for k, v in compat.iteritems(_reso_freq_map)]) + @classmethod def get_str(cls, reso): return cls._reso_str_map.get(reso, 'day') + @classmethod + def get_reso(cls, resostr): + return cls._str_reso_map.get(resostr, cls.RESO_DAY) + @classmethod def get_freq(cls, resostr): - return cls._reso_period_map[resostr] + return cls._reso_freq_map[resostr] + + @classmethod + def get_str_from_freq(cls, freq): + return cls._freq_reso_map.get(freq, 'day') + + @classmethod + def get_reso_from_freq(cls, freq): + return cls.get_reso(cls.get_str_from_freq(freq)) + def get_reso_string(reso): return Resolution.get_str(reso) @@ -593,7 +610,7 @@ def _period_alias_dictionary(): def _infer_period_group(freqstr): - return _period_group(Resolution._reso_period_map[freqstr]) + return _period_group(Resolution._reso_freq_map[freqstr]) def _period_group(freqstr): diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index dca2947f6a7a6..9423037844e74 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -1536,14 +1536,6 @@ def is_normalized(self): """ return tslib.dates_normalized(self.asi8, self.tz) - @cache_readonly - def resolution(self): - """ - Returns day, hour, minute, second, or microsecond - """ - reso = self._resolution - return get_reso_string(reso) - @cache_readonly def _resolution(self): return tslib.resolution(self.asi8, self.tz)
Add `resolution` to `PeriodIndex` as `DatetimeIndex` has. NOTE: Going to use this to calculate common freq in #7670.
https://api.github.com/repos/pandas-dev/pandas/pulls/7708
2014-07-09T15:14:46Z
2014-07-21T11:49:58Z
2014-07-21T11:49:58Z
2014-07-23T11:09:22Z
BUG: offset normalize option may not work in addition/subtraction
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index e9406b7f49245..76bc796beced8 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -493,7 +493,7 @@ The basic ``DateOffset`` takes the same arguments as .. ipython:: python - d = datetime(2008, 8, 18) + d = datetime(2008, 8, 18, 9, 0) d + relativedelta(months=4, days=5) We could have done the same thing with ``DateOffset``: @@ -568,10 +568,21 @@ particular day of the week: .. ipython:: python + d d + Week() d + Week(weekday=4) (d + Week(weekday=4)).weekday() + d - Week() + +``normalize`` option will be effective for addition and subtraction. + +.. ipython:: python + + d + Week(normalize=True) + d - Week(normalize=True) + + Another example is parameterizing ``YearEnd`` with the specific ending month: .. ipython:: python diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index d1fe287bf33be..57181b43df9f6 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -157,7 +157,7 @@ def isAnchored(self): return (self.n == 1) def copy(self): - return self.__class__(self.n, **self.kwds) + return self.__class__(self.n, normalize=self.normalize, **self.kwds) def _should_cache(self): return self.isAnchored() and self._cacheable @@ -251,34 +251,34 @@ def __sub__(self, other): if isinstance(other, datetime): raise TypeError('Cannot subtract datetime from offset.') elif type(other) == type(self): - return self.__class__(self.n - other.n, **self.kwds) + return self.__class__(self.n - other.n, normalize=self.normalize, **self.kwds) else: # pragma: no cover return NotImplemented def __rsub__(self, other): - return self.__class__(-self.n, **self.kwds) + other + return self.__class__(-self.n, normalize=self.normalize, **self.kwds) + other def __mul__(self, someInt): - return self.__class__(n=someInt * self.n, **self.kwds) + return self.__class__(n=someInt * self.n, normalize=self.normalize, **self.kwds) def __rmul__(self, someInt): return self.__mul__(someInt) def __neg__(self): - return self.__class__(-self.n, **self.kwds) + return self.__class__(-self.n, normalize=self.normalize, **self.kwds) @apply_wraps def rollback(self, dt): """Roll provided date backward to next offset only if not on offset""" if not self.onOffset(dt): - dt = dt - self.__class__(1, **self.kwds) + dt = dt - self.__class__(1, normalize=self.normalize, **self.kwds) return dt @apply_wraps def rollforward(self, dt): """Roll provided date forward to next offset only if not on offset""" if not self.onOffset(dt): - dt = dt + self.__class__(1, **self.kwds) + dt = dt + self.__class__(1, normalize=self.normalize, **self.kwds) return dt def onOffset(self, dt): diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py index 1ef1bd184bdbc..9febec68bd458 100644 --- a/pandas/tseries/tests/test_offsets.py +++ b/pandas/tseries/tests/test_offsets.py @@ -361,6 +361,42 @@ def test_onOffset(self): date = datetime(dt.year, dt.month, dt.day) self.assert_(offset_n.onOffset(date)) + def test_add(self): + dt = datetime(2011, 1, 1, 9, 0) + + for offset in self.offset_types: + offset_s = self._get_offset(offset) + expected = self.expecteds[offset.__name__] + + result_dt = dt + offset_s + result_ts = Timestamp(dt) + offset_s + for result in [result_dt, result_ts]: + self.assertTrue(isinstance(result, Timestamp)) + self.assertEqual(result, expected) + + tm._skip_if_no_pytz() + for tz in self.timezones: + expected_localize = expected.tz_localize(tz) + result = Timestamp(dt, tz=tz) + offset_s + self.assert_(isinstance(result, Timestamp)) + self.assertEqual(result, expected_localize) + + # normalize=True + offset_s = self._get_offset(offset, normalize=True) + expected = Timestamp(expected.date()) + + result_dt = dt + offset_s + result_ts = Timestamp(dt) + offset_s + for result in [result_dt, result_ts]: + self.assertTrue(isinstance(result, Timestamp)) + self.assertEqual(result, expected) + + for tz in self.timezones: + expected_localize = expected.tz_localize(tz) + result = Timestamp(dt, tz=tz) + offset_s + self.assert_(isinstance(result, Timestamp)) + self.assertEqual(result, expected_localize) + class TestDateOffset(Base): _multiprocess_can_split_ = True diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index 70b6b308b6b37..2fd71521b24d5 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -753,7 +753,10 @@ cdef class _Timestamp(datetime): elif isinstance(other, timedelta) or hasattr(other, 'delta'): nanos = _delta_to_nanoseconds(other) - return Timestamp(self.value + nanos, tz=self.tzinfo, offset=self.offset) + result = Timestamp(self.value + nanos, tz=self.tzinfo, offset=self.offset) + if getattr(other, 'normalize', False): + result = Timestamp(normalize_date(result)) + return result result = datetime.__add__(self, other) if isinstance(result, datetime):
Closes problem found in #7375. @jreback Is this for 0.15 (needs release note)?
https://api.github.com/repos/pandas-dev/pandas/pulls/7705
2014-07-09T14:10:24Z
2014-07-09T15:48:51Z
2014-07-09T15:48:51Z
2014-07-10T11:01:35Z
DOC: table keyword missing in the docstring for Series.plot() and DataFr...
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index c3189ae98f662..83fbc51787b20 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -2067,6 +2067,10 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True, position : float Specify relative alignments for bar plot layout. From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center) + table : boolean, Series or DataFrame, default False + If True, draw a table using the data in the DataFrame and the data will + be transposed to meet matplotlib’s default layout. + If a Series or DataFrame is passed, use passed data to draw a table. kwds : keywords Options to pass to matplotlib plotting method @@ -2210,6 +2214,10 @@ def plot_series(series, label=None, kind='line', use_index=True, rot=None, position : float Specify relative alignments for bar plot layout. From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center) + table : boolean, Series or DataFrame, default False + If True, draw a table using the data in the Series and the data will + be transposed to meet matplotlib’s default layout. + If a Series or DataFrame is passed, use passed data to draw a table. kwds : keywords Options to pass to matplotlib plotting method @@ -2795,7 +2803,7 @@ def table(ax, data, rowLabels=None, colLabels=None, elif isinstance(data, DataFrame): pass else: - raise ValueError('Input data must be dataframe or series') + raise ValueError('Input data must be DataFrame or Series') if rowLabels is None: rowLabels = data.index
...ame.plot()
https://api.github.com/repos/pandas-dev/pandas/pulls/7698
2014-07-08T21:01:42Z
2014-07-09T06:37:28Z
2014-07-09T06:37:28Z
2015-04-25T23:33:31Z
BUG/PERF: offsets.apply doesnt preserve nanosecond
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index c2d234b5a06c1..7ef8e1fac08d1 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -256,6 +256,9 @@ Bug Fixes - Bug in repeated timeseries line and area plot may result in ``ValueError`` or incorrect kind (:issue:`7733`) +- Bug in ``offsets.apply``, ``rollforward`` and ``rollback`` may reset nanosecond (:issue:`7697`) +- Bug in ``offsets.apply``, ``rollforward`` and ``rollback`` may raise ``AttributeError`` if ``Timestamp`` has ``dateutil`` tzinfo (:issue:`7697`) + - Bug in ``is_superperiod`` and ``is_subperiod`` cannot handle higher frequencies than ``S`` (:issue:`7760`, :issue:`7772`, :issue:`7803`) diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 8f77f88910a3c..d2c9acedcee94 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -22,13 +22,14 @@ 'QuarterBegin', 'BQuarterBegin', 'QuarterEnd', 'BQuarterEnd', 'LastWeekOfMonth', 'FY5253Quarter', 'FY5253', 'Week', 'WeekOfMonth', 'Easter', - 'Hour', 'Minute', 'Second', 'Milli', 'Micro', 'Nano'] + 'Hour', 'Minute', 'Second', 'Milli', 'Micro', 'Nano', + 'DateOffset'] # convert to/from datetime/timestamp to allow invalid Timestamp ranges to pass thru def as_timestamp(obj): + if isinstance(obj, Timestamp): + return obj try: - if isinstance(obj, Timestamp): - return obj return Timestamp(obj) except (OutOfBoundsDatetime): pass @@ -45,22 +46,46 @@ def apply_wraps(func): def wrapper(self, other): if other is tslib.NaT: return tslib.NaT - if type(other) == date: - other = datetime(other.year, other.month, other.day) - if isinstance(other, (np.datetime64, datetime)): + elif isinstance(other, (timedelta, Tick, DateOffset)): + # timedelta path + return func(self, other) + elif isinstance(other, (np.datetime64, datetime, date)): other = as_timestamp(other) tz = getattr(other, 'tzinfo', None) - result = func(self, other) + nano = getattr(other, 'nanosecond', 0) - if self.normalize: - result = tslib.normalize_date(result) + try: + result = func(self, other) + + if self.normalize: + # normalize_date returns normal datetime + result = tslib.normalize_date(result) + result = Timestamp(result) - if isinstance(other, Timestamp) and not isinstance(result, Timestamp): - result = as_timestamp(result) + # nanosecond may be deleted depending on offset process + if not self.normalize and nano != 0: + if not isinstance(self, Nano) and result.nanosecond != nano: + if result.tz is not None: + # convert to UTC + value = tslib.tz_convert_single(result.value, 'UTC', result.tz) + else: + value = result.value + result = Timestamp(value + nano) + + if tz is not None and result.tzinfo is None: + result = tslib._localize_pydatetime(result, tz) + + except OutOfBoundsDatetime: + result = func(self, as_datetime(other)) + + if self.normalize: + # normalize_date returns normal datetime + result = tslib.normalize_date(result) + + if tz is not None and result.tzinfo is None: + result = tslib._localize_pydatetime(result, tz) - if tz is not None and result.tzinfo is None: - result = result.tz_localize(tz) return result return wrapper @@ -144,7 +169,6 @@ def __init__(self, n=1, normalize=False, **kwds): @apply_wraps def apply(self, other): - other = as_datetime(other) if len(self.kwds) > 0: if self.n > 0: for i in range(self.n): @@ -152,9 +176,9 @@ def apply(self, other): else: for i in range(-self.n): other = other - self._offset - return as_timestamp(other) + return other else: - return as_timestamp(other + timedelta(self.n)) + return other + timedelta(self.n) def isAnchored(self): return (self.n == 1) @@ -270,16 +294,16 @@ def __rmul__(self, someInt): def __neg__(self): return self.__class__(-self.n, normalize=self.normalize, **self.kwds) - @apply_wraps def rollback(self, dt): """Roll provided date backward to next offset only if not on offset""" + dt = as_timestamp(dt) if not self.onOffset(dt): dt = dt - self.__class__(1, normalize=self.normalize, **self.kwds) return dt - @apply_wraps def rollforward(self, dt): """Roll provided date forward to next offset only if not on offset""" + dt = as_timestamp(dt) if not self.onOffset(dt): dt = dt + self.__class__(1, normalize=self.normalize, **self.kwds) return dt @@ -452,8 +476,7 @@ def apply(self, other): if self.offset: result = result + self.offset - - return as_timestamp(result) + return result elif isinstance(other, (timedelta, Tick)): return BDay(self.n, offset=self.offset + other, @@ -550,7 +573,6 @@ def apply(self, other): else: roll = 'backward' - # Distinguish input cases to enhance performance if isinstance(other, datetime): date_in = other np_dt = np.datetime64(date_in.date()) @@ -563,8 +585,7 @@ def apply(self, other): if self.offset: result = result + self.offset - - return as_timestamp(result) + return result elif isinstance(other, (timedelta, Tick)): return BDay(self.n, offset=self.offset + other, @@ -613,11 +634,11 @@ def apply(self, other): n = self.n _, days_in_month = tslib.monthrange(other.year, other.month) if other.day != days_in_month: - other = as_datetime(other) + relativedelta(months=-1, day=31) + other = other + relativedelta(months=-1, day=31) if n <= 0: n = n + 1 - other = as_datetime(other) + relativedelta(months=n, day=31) - return as_timestamp(other) + other = other + relativedelta(months=n, day=31) + return other def onOffset(self, dt): if self.normalize and not _is_normalized(dt): @@ -638,8 +659,7 @@ def apply(self, other): if other.day > 1 and n <= 0: # then roll forward if n<=0 n += 1 - other = as_datetime(other) + relativedelta(months=n, day=1) - return as_timestamp(other) + return other + relativedelta(months=n, day=1) def onOffset(self, dt): if self.normalize and not _is_normalized(dt): @@ -657,9 +677,7 @@ def isAnchored(self): @apply_wraps def apply(self, other): - n = self.n - wkday, days_in_month = tslib.monthrange(other.year, other.month) lastBDay = days_in_month - max(((wkday + days_in_month - 1) % 7) - 4, 0) @@ -668,11 +686,11 @@ def apply(self, other): n = n - 1 elif n <= 0 and other.day > lastBDay: n = n + 1 - other = as_datetime(other) + relativedelta(months=n, day=31) + other = other + relativedelta(months=n, day=31) if other.weekday() > 4: other = other - BDay() - return as_timestamp(other) + return other _prefix = 'BM' @@ -683,7 +701,6 @@ class BusinessMonthBegin(MonthOffset): @apply_wraps def apply(self, other): n = self.n - wkday, _ = tslib.monthrange(other.year, other.month) first = _get_firstbday(wkday) @@ -691,15 +708,15 @@ def apply(self, other): # as if rolled forward already n += 1 elif other.day < first and n > 0: - other = as_datetime(other) + timedelta(days=first - other.day) + other = other + timedelta(days=first - other.day) n -= 1 - other = as_datetime(other) + relativedelta(months=n) + other = other + relativedelta(months=n) wkday, _ = tslib.monthrange(other.year, other.month) first = _get_firstbday(wkday) result = datetime(other.year, other.month, first, other.hour, other.minute, other.second, other.microsecond) - return as_timestamp(result) + return result def onOffset(self, dt): if self.normalize and not _is_normalized(dt): @@ -746,30 +763,29 @@ def __init__(self, n=1, normalize=False, **kwds): self.kwds = kwds self.offset = kwds.get('offset', timedelta(0)) self.weekmask = kwds.get('weekmask', 'Mon Tue Wed Thu Fri') - self.cbday = CustomBusinessDay(n=self.n, normalize=normalize, **kwds) - self.m_offset = MonthEnd(normalize=normalize) + self.cbday = CustomBusinessDay(n=self.n, **kwds) + self.m_offset = MonthEnd() @apply_wraps def apply(self,other): n = self.n - dt_in = other # First move to month offset - cur_mend = self.m_offset.rollforward(dt_in) + cur_mend = self.m_offset.rollforward(other) # Find this custom month offset cur_cmend = self.cbday.rollback(cur_mend) - + # handle zero case. arbitrarily rollforward - if n == 0 and dt_in != cur_cmend: + if n == 0 and other != cur_cmend: n += 1 - if dt_in < cur_cmend and n >= 1: + if other < cur_cmend and n >= 1: n -= 1 - elif dt_in > cur_cmend and n <= -1: + elif other > cur_cmend and n <= -1: n += 1 new = cur_mend + n * MonthEnd() result = self.cbday.rollback(new) - return as_timestamp(result) + return result class CustomBusinessMonthBegin(BusinessMixin, MonthOffset): """ @@ -824,7 +840,7 @@ def apply(self,other): new = cur_mbegin + n * MonthBegin() result = self.cbday.rollforward(new) - return as_timestamp(result) + return result class Week(DateOffset): """ @@ -856,23 +872,22 @@ def isAnchored(self): def apply(self, other): base = other if self.weekday is None: - return as_timestamp(as_datetime(other) + self.n * self._inc) + return other + self.n * self._inc if self.n > 0: k = self.n otherDay = other.weekday() if otherDay != self.weekday: - other = as_datetime(other) + timedelta((self.weekday - otherDay) % 7) + other = other + timedelta((self.weekday - otherDay) % 7) k = k - 1 - other = as_datetime(other) + other = other for i in range(k): other = other + self._inc else: k = self.n otherDay = other.weekday() if otherDay != self.weekday: - other = as_datetime(other) + timedelta((self.weekday - otherDay) % 7) - other = as_datetime(other) + other = other + timedelta((self.weekday - otherDay) % 7) for i in range(-k): other = other - self._inc @@ -979,20 +994,14 @@ def apply(self, other): else: months = self.n + 1 - other = self.getOffsetOfMonth(as_datetime(other) + relativedelta(months=months, day=1)) + other = self.getOffsetOfMonth(other + relativedelta(months=months, day=1)) other = datetime(other.year, other.month, other.day, base.hour, base.minute, base.second, base.microsecond) - if getattr(other, 'tzinfo', None) is not None: - other = other.tzinfo.localize(other) return other def getOffsetOfMonth(self, dt): w = Week(weekday=self.weekday) - - d = datetime(dt.year, dt.month, 1) - if getattr(dt, 'tzinfo', None) is not None: - d = dt.tzinfo.localize(d) - + d = datetime(dt.year, dt.month, 1, tzinfo=dt.tzinfo) d = w.rollforward(d) for i in range(self.week): @@ -1003,9 +1012,7 @@ def getOffsetOfMonth(self, dt): def onOffset(self, dt): if self.normalize and not _is_normalized(dt): return False - d = datetime(dt.year, dt.month, dt.day) - if getattr(dt, 'tzinfo', None) is not None: - d = dt.tzinfo.localize(d) + d = datetime(dt.year, dt.month, dt.day, tzinfo=dt.tzinfo) return d == self.getOffsetOfMonth(dt) @property @@ -1072,18 +1079,14 @@ def apply(self, other): else: months = self.n + 1 - return self.getOffsetOfMonth(as_datetime(other) + relativedelta(months=months, day=1)) + return self.getOffsetOfMonth(other + relativedelta(months=months, day=1)) def getOffsetOfMonth(self, dt): m = MonthEnd() - d = datetime(dt.year, dt.month, 1, dt.hour, dt.minute, dt.second, dt.microsecond) - if getattr(dt, 'tzinfo', None) is not None: - d = dt.tzinfo.localize(d) - + d = datetime(dt.year, dt.month, 1, dt.hour, dt.minute, + dt.second, dt.microsecond, tzinfo=dt.tzinfo) eom = m.rollforward(d) - w = Week(weekday=self.weekday) - return w.rollback(eom) def onOffset(self, dt): @@ -1175,13 +1178,11 @@ def apply(self, other): elif n <= 0 and other.day > lastBDay and monthsToGo == 0: n = n + 1 - other = as_datetime(other) + relativedelta(months=monthsToGo + 3 * n, day=31) - if getattr(base, 'tzinfo', None) is not None: - other = base.tzinfo.localize(other) + other = other + relativedelta(months=monthsToGo + 3 * n, day=31) + other = tslib._localize_pydatetime(other, base.tzinfo) if other.weekday() > 4: other = other - BDay() - - return as_timestamp(other) + return other def onOffset(self, dt): if self.normalize and not _is_normalized(dt): @@ -1219,8 +1220,6 @@ class BQuarterBegin(QuarterOffset): @apply_wraps def apply(self, other): n = self.n - other = as_datetime(other) - wkday, _ = tslib.monthrange(other.year, other.month) first = _get_firstbday(wkday) @@ -1244,9 +1243,7 @@ def apply(self, other): result = datetime(other.year, other.month, first, other.hour, other.minute, other.second, other.microsecond) - if getattr(other, 'tzinfo', None) is not None: - result = other.tzinfo.localize(result) - return as_timestamp(result) + return result class QuarterEnd(QuarterOffset): @@ -1272,12 +1269,9 @@ def isAnchored(self): @apply_wraps def apply(self, other): n = self.n - base = other other = datetime(other.year, other.month, other.day, other.hour, other.minute, other.second, other.microsecond) - other = as_datetime(other) - wkday, days_in_month = tslib.monthrange(other.year, other.month) monthsToGo = 3 - ((other.month - self.startingMonth) % 3) @@ -1288,9 +1282,7 @@ def apply(self, other): n = n - 1 other = other + relativedelta(months=monthsToGo + 3 * n, day=31) - if getattr(base, 'tzinfo', None) is not None: - other = base.tzinfo.localize(other) - return as_timestamp(other) + return other def onOffset(self, dt): if self.normalize and not _is_normalized(dt): @@ -1311,8 +1303,6 @@ def isAnchored(self): @apply_wraps def apply(self, other): n = self.n - other = as_datetime(other) - wkday, days_in_month = tslib.monthrange(other.year, other.month) monthsSince = (other.month - self.startingMonth) % 3 @@ -1326,7 +1316,7 @@ def apply(self, other): n = n + 1 other = other + relativedelta(months=3 * n - monthsSince, day=1) - return as_timestamp(other) + return other class YearOffset(DateOffset): @@ -1361,8 +1351,6 @@ class BYearEnd(YearOffset): @apply_wraps def apply(self, other): n = self.n - other = as_datetime(other) - wkday, days_in_month = tslib.monthrange(other.year, self.month) lastBDay = (days_in_month - max(((wkday + days_in_month - 1) % 7) - 4, 0)) @@ -1387,7 +1375,7 @@ def apply(self, other): if result.weekday() > 4: result = result - BDay() - return as_timestamp(result) + return result class BYearBegin(YearOffset): @@ -1399,8 +1387,6 @@ class BYearBegin(YearOffset): @apply_wraps def apply(self, other): n = self.n - other = as_datetime(other) - wkday, days_in_month = tslib.monthrange(other.year, self.month) first = _get_firstbday(wkday) @@ -1420,8 +1406,8 @@ def apply(self, other): other = other + relativedelta(years=years) wkday, days_in_month = tslib.monthrange(other.year, self.month) first = _get_firstbday(wkday) - return as_timestamp(datetime(other.year, self.month, first, other.hour, - other.minute, other.second, other.microsecond)) + return datetime(other.year, self.month, first, other.hour, + other.minute, other.second, other.microsecond) class YearEnd(YearOffset): @@ -1473,8 +1459,7 @@ def _rollf(date): else: # n == 0, roll forward result = _rollf(result) - - return as_timestamp(result) + return result def onOffset(self, dt): if self.normalize and not _is_normalized(dt): @@ -1490,15 +1475,15 @@ class YearBegin(YearOffset): @apply_wraps def apply(self, other): - def _increment(date): - year = date.year + def _increment(date, n): + year = date.year + n - 1 if date.month >= self.month: year += 1 return datetime(year, self.month, 1, date.hour, date.minute, date.second, date.microsecond) - def _decrement(date): - year = date.year + def _decrement(date, n): + year = date.year + n + 1 if date.month < self.month or (date.month == self.month and date.day == 1): year -= 1 @@ -1507,24 +1492,19 @@ def _decrement(date): def _rollf(date): if (date.month != self.month) or date.day > 1: - date = _increment(date) + date = _increment(date, 1) return date n = self.n result = other if n > 0: - while n > 0: - result = _increment(result) - n -= 1 + result = _increment(result, n) elif n < 0: - while n < 0: - result = _decrement(result) - n += 1 + result = _decrement(result, n) else: # n == 0, roll forward result = _rollf(result) - - return as_timestamp(result) + return result def onOffset(self, dt): if self.normalize and not _is_normalized(dt): @@ -1624,10 +1604,9 @@ def apply(self, other): datetime(other.year, self.startingMonth, 1)) next_year = self.get_year_end( datetime(other.year + 1, self.startingMonth, 1)) - if getattr(other, 'tzinfo', None) is not None: - prev_year = other.tzinfo.localize(prev_year) - cur_year = other.tzinfo.localize(cur_year) - next_year = other.tzinfo.localize(next_year) + prev_year = tslib._localize_pydatetime(prev_year, other.tzinfo) + cur_year = tslib._localize_pydatetime(cur_year, other.tzinfo) + next_year = tslib._localize_pydatetime(next_year, other.tzinfo) if n > 0: if other == prev_year: @@ -1686,9 +1665,7 @@ def get_year_end(self, dt): return self._get_year_end_last(dt) def get_target_month_end(self, dt): - target_month = datetime(dt.year, self.startingMonth, 1) - if getattr(dt, 'tzinfo', None) is not None: - target_month = dt.tzinfo.localize(target_month) + target_month = datetime(dt.year, self.startingMonth, 1, tzinfo=dt.tzinfo) next_month_first_of = target_month + relativedelta(months=+1) return next_month_first_of + relativedelta(days=-1) @@ -1706,9 +1683,7 @@ def _get_year_end_nearest(self, dt): return backward def _get_year_end_last(self, dt): - current_year = datetime(dt.year, self.startingMonth, 1) - if getattr(dt, 'tzinfo', None) is not None: - current_year = dt.tzinfo.localize(current_year) + current_year = datetime(dt.year, self.startingMonth, 1, tzinfo=dt.tzinfo) return current_year + self._offset_lwom @property @@ -1822,8 +1797,6 @@ def isAnchored(self): @apply_wraps def apply(self, other): base = other - other = as_datetime(other) - n = self.n if n > 0: @@ -1926,8 +1899,7 @@ def __init__(self, n=1, **kwds): def apply(self, other): currentEaster = easter(other.year) currentEaster = datetime(currentEaster.year, currentEaster.month, currentEaster.day) - if getattr(other, 'tzinfo', None) is not None: - currentEaster = other.tzinfo.localize(currentEaster) + currentEaster = tslib._localize_pydatetime(currentEaster, other.tzinfo) # NOTE: easter returns a datetime.date so we have to convert to type of other if self.n >= 0: @@ -2021,19 +1993,9 @@ def nanos(self): def apply(self, other): # Timestamp can handle tz and nano sec, thus no need to use apply_wraps - if type(other) == date: - other = datetime(other.year, other.month, other.day) - elif isinstance(other, (np.datetime64, datetime)): - other = as_timestamp(other) - - if isinstance(other, datetime): - result = other + self.delta - if self.normalize: - # normalize_date returns normal datetime - result = tslib.normalize_date(result) - return as_timestamp(result) - - elif isinstance(other, timedelta): + if isinstance(other, (datetime, np.datetime64, date)): + return as_timestamp(other) + self + if isinstance(other, timedelta): return other + self.delta elif isinstance(other, type(self)): return type(self)(self.n + other.n) @@ -2067,16 +2029,7 @@ def _delta_to_tick(delta): else: # pragma: no cover return Nano(nanos) - -def _delta_to_nanoseconds(delta): - if isinstance(delta, np.timedelta64): - return delta.astype('timedelta64[ns]').item() - elif isinstance(delta, Tick): - delta = delta.delta - - return (delta.days * 24 * 60 * 60 * 1000000 - + delta.seconds * 1000000 - + delta.microseconds) * 1000 +_delta_to_nanoseconds = tslib._delta_to_nanoseconds class Day(Tick): diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py index 9febec68bd458..d99cfb254cc48 100644 --- a/pandas/tseries/tests/test_offsets.py +++ b/pandas/tseries/tests/test_offsets.py @@ -22,8 +22,8 @@ from pandas.tseries.tools import parse_time_string, _maybe_get_tz import pandas.tseries.offsets as offsets -from pandas.tslib import monthrange, OutOfBoundsDatetime, NaT -from pandas.lib import Timestamp +from pandas.tslib import NaT, Timestamp +import pandas.tslib as tslib from pandas.util.testing import assertRaisesRegexp import pandas.util.testing as tm from pandas.tseries.offsets import BusinessMonthEnd, CacheableOffset, \ @@ -39,7 +39,7 @@ def test_monthrange(): import calendar for y in range(2000, 2013): for m in range(1, 13): - assert monthrange(y, m) == calendar.monthrange(y, m) + assert tslib.monthrange(y, m) == calendar.monthrange(y, m) #### @@ -99,6 +99,9 @@ class Base(tm.TestCase): skip_np_u1p7 = [offsets.CustomBusinessDay, offsets.CDay, offsets.CustomBusinessMonthBegin, offsets.CustomBusinessMonthEnd, offsets.Nano] + timezones = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', + 'dateutil/Asia/Tokyo', 'dateutil/US/Pacific'] + @property def offset_types(self): if _np_version_under1p7: @@ -118,6 +121,8 @@ def _get_offset(self, klass, value=1, normalize=False): klass = klass(n=value, week=1, weekday=5, normalize=normalize) elif klass is Week: klass = klass(n=value, weekday=5, normalize=normalize) + elif klass is DateOffset: + klass = klass(days=value, normalize=normalize) else: try: klass = klass(value, normalize=normalize) @@ -138,7 +143,18 @@ def test_apply_out_of_range(self): result = Timestamp('20080101') + offset self.assertIsInstance(result, datetime) - except (OutOfBoundsDatetime): + self.assertIsNone(result.tzinfo) + + tm._skip_if_no_pytz() + tm._skip_if_no_dateutil() + # Check tz is preserved + for tz in self.timezones: + t = Timestamp('20080101', tz=tz) + result = t + offset + self.assertIsInstance(result, datetime) + self.assertEqual(t.tzinfo, result.tzinfo) + + except (tslib.OutOfBoundsDatetime): raise except (ValueError, KeyError) as e: raise nose.SkipTest("cannot create out_of_range offset: {0} {1}".format(str(self).split('.')[-1],e)) @@ -152,6 +168,7 @@ def setUp(self): # are applied to 2011/01/01 09:00 (Saturday) # used for .apply and .rollforward self.expecteds = {'Day': Timestamp('2011-01-02 09:00:00'), + 'DateOffset': Timestamp('2011-01-02 09:00:00'), 'BusinessDay': Timestamp('2011-01-03 09:00:00'), 'CustomBusinessDay': Timestamp('2011-01-03 09:00:00'), 'CustomBusinessMonthEnd': Timestamp('2011-01-31 09:00:00'), @@ -181,8 +198,6 @@ def setUp(self): 'Micro': Timestamp('2011-01-01 09:00:00.000001'), 'Nano': Timestamp(np.datetime64('2011-01-01T09:00:00.000000001Z'))} - self.timezones = ['UTC', 'Asia/Tokyo', 'US/Eastern'] - def test_return_type(self): for offset in self.offset_types: offset = self._get_offset(offset) @@ -204,37 +219,48 @@ def _check_offsetfunc_works(self, offset, funcname, dt, expected, func = getattr(offset_s, funcname) result = func(dt) - self.assert_(isinstance(result, Timestamp)) + self.assertTrue(isinstance(result, Timestamp)) self.assertEqual(result, expected) result = func(Timestamp(dt)) - self.assert_(isinstance(result, Timestamp)) + self.assertTrue(isinstance(result, Timestamp)) self.assertEqual(result, expected) + # test nano second is preserved + result = func(Timestamp(dt) + Nano(5)) + self.assertTrue(isinstance(result, Timestamp)) + if normalize is False: + self.assertEqual(result, expected + Nano(5)) + else: + self.assertEqual(result, expected) + if isinstance(dt, np.datetime64): # test tz when input is datetime or Timestamp return tm._skip_if_no_pytz() - import pytz + tm._skip_if_no_dateutil() + for tz in self.timezones: expected_localize = expected.tz_localize(tz) + tz_obj = _maybe_get_tz(tz) + dt_tz = tslib._localize_pydatetime(dt, tz_obj) - dt_tz = pytz.timezone(tz).localize(dt) result = func(dt_tz) - self.assert_(isinstance(result, Timestamp)) + self.assertTrue(isinstance(result, Timestamp)) self.assertEqual(result, expected_localize) result = func(Timestamp(dt, tz=tz)) - self.assert_(isinstance(result, Timestamp)) + self.assertTrue(isinstance(result, Timestamp)) self.assertEqual(result, expected_localize) - def _check_nanofunc_works(self, offset, funcname, dt, expected): - offset = self._get_offset(offset) - func = getattr(offset, funcname) - - t1 = Timestamp(dt) - self.assertEqual(func(t1), expected) + # test nano second is preserved + result = func(Timestamp(dt, tz=tz) + Nano(5)) + self.assertTrue(isinstance(result, Timestamp)) + if normalize is False: + self.assertEqual(result, expected_localize + Nano(5)) + else: + self.assertEqual(result, expected_localize) def test_apply(self): sdt = datetime(2011, 1, 1, 9, 0) @@ -243,21 +269,18 @@ def test_apply(self): for offset in self.offset_types: for dt in [sdt, ndt]: expected = self.expecteds[offset.__name__] - if offset == Nano: - self._check_nanofunc_works(offset, 'apply', dt, expected) - else: - self._check_offsetfunc_works(offset, 'apply', dt, expected) + self._check_offsetfunc_works(offset, 'apply', dt, expected) - expected = Timestamp(expected.date()) - self._check_offsetfunc_works(offset, 'apply', dt, expected, - normalize=True) + expected = Timestamp(expected.date()) + self._check_offsetfunc_works(offset, 'apply', dt, expected, + normalize=True) def test_rollforward(self): expecteds = self.expecteds.copy() # result will not be changed if the target is on the offset no_changes = ['Day', 'MonthBegin', 'YearBegin', 'Week', 'Hour', 'Minute', - 'Second', 'Milli', 'Micro', 'Nano'] + 'Second', 'Milli', 'Micro', 'Nano', 'DateOffset'] for n in no_changes: expecteds[n] = Timestamp('2011/01/01 09:00') @@ -267,6 +290,7 @@ def test_rollforward(self): norm_expected[k] = Timestamp(norm_expected[k].date()) normalized = {'Day': Timestamp('2011-01-02 00:00:00'), + 'DateOffset': Timestamp('2011-01-02 00:00:00'), 'MonthBegin': Timestamp('2011-02-01 00:00:00'), 'YearBegin': Timestamp('2012-01-01 00:00:00'), 'Week': Timestamp('2011-01-08 00:00:00'), @@ -283,13 +307,10 @@ def test_rollforward(self): for offset in self.offset_types: for dt in [sdt, ndt]: expected = expecteds[offset.__name__] - if offset == Nano: - self._check_nanofunc_works(offset, 'rollforward', dt, expected) - else: - self._check_offsetfunc_works(offset, 'rollforward', dt, expected) - expected = norm_expected[offset.__name__] - self._check_offsetfunc_works(offset, 'rollforward', dt, expected, - normalize=True) + self._check_offsetfunc_works(offset, 'rollforward', dt, expected) + expected = norm_expected[offset.__name__] + self._check_offsetfunc_works(offset, 'rollforward', dt, expected, + normalize=True) def test_rollback(self): expecteds = {'BusinessDay': Timestamp('2010-12-31 09:00:00'), @@ -314,7 +335,7 @@ def test_rollback(self): # result will not be changed if the target is on the offset for n in ['Day', 'MonthBegin', 'YearBegin', 'Week', 'Hour', 'Minute', - 'Second', 'Milli', 'Micro', 'Nano']: + 'Second', 'Milli', 'Micro', 'Nano', 'DateOffset']: expecteds[n] = Timestamp('2011/01/01 09:00') # but be changed when normalize=True @@ -323,6 +344,7 @@ def test_rollback(self): norm_expected[k] = Timestamp(norm_expected[k].date()) normalized = {'Day': Timestamp('2010-12-31 00:00:00'), + 'DateOffset': Timestamp('2010-12-31 00:00:00'), 'MonthBegin': Timestamp('2010-12-01 00:00:00'), 'YearBegin': Timestamp('2010-01-01 00:00:00'), 'Week': Timestamp('2010-12-25 00:00:00'), @@ -339,27 +361,24 @@ def test_rollback(self): for offset in self.offset_types: for dt in [sdt, ndt]: expected = expecteds[offset.__name__] - if offset == Nano: - self._check_nanofunc_works(offset, 'rollback', dt, expected) - else: - self._check_offsetfunc_works(offset, 'rollback', dt, expected) + self._check_offsetfunc_works(offset, 'rollback', dt, expected) - expected = norm_expected[offset.__name__] - self._check_offsetfunc_works(offset, 'rollback', - dt, expected, normalize=True) + expected = norm_expected[offset.__name__] + self._check_offsetfunc_works(offset, 'rollback', + dt, expected, normalize=True) def test_onOffset(self): for offset in self.offset_types: dt = self.expecteds[offset.__name__] offset_s = self._get_offset(offset) - self.assert_(offset_s.onOffset(dt)) + self.assertTrue(offset_s.onOffset(dt)) # when normalize=True, onOffset checks time is 00:00:00 offset_n = self._get_offset(offset, normalize=True) - self.assert_(not offset_n.onOffset(dt)) + self.assertFalse(offset_n.onOffset(dt)) date = datetime(dt.year, dt.month, dt.day) - self.assert_(offset_n.onOffset(date)) + self.assertTrue(offset_n.onOffset(date)) def test_add(self): dt = datetime(2011, 1, 1, 9, 0) @@ -2482,6 +2501,13 @@ def test_offset(self): datetime(2005, 12, 30): datetime(2006, 1, 1), datetime(2005, 12, 31): datetime(2006, 1, 1), })) + tests.append((YearBegin(3), + {datetime(2008, 1, 1): datetime(2011, 1, 1), + datetime(2008, 6, 30): datetime(2011, 1, 1), + datetime(2008, 12, 31): datetime(2011, 1, 1), + datetime(2005, 12, 30): datetime(2008, 1, 1), + datetime(2005, 12, 31): datetime(2008, 1, 1), })) + tests.append((YearBegin(-1), {datetime(2007, 1, 1): datetime(2006, 1, 1), datetime(2007, 1, 15): datetime(2007, 1, 1), @@ -2509,12 +2535,25 @@ def test_offset(self): datetime(2007, 12, 15): datetime(2008, 4, 1), datetime(2012, 1, 31): datetime(2012, 4, 1), })) + tests.append((YearBegin(4, month=4), + {datetime(2007, 4, 1): datetime(2011, 4, 1), + datetime(2007, 4, 15): datetime(2011, 4, 1), + datetime(2007, 3, 1): datetime(2010, 4, 1), + datetime(2007, 12, 15): datetime(2011, 4, 1), + datetime(2012, 1, 31): datetime(2015, 4, 1), })) + tests.append((YearBegin(-1, month=4), {datetime(2007, 4, 1): datetime(2006, 4, 1), datetime(2007, 3, 1): datetime(2006, 4, 1), datetime(2007, 12, 15): datetime(2007, 4, 1), datetime(2012, 1, 31): datetime(2011, 4, 1), })) + tests.append((YearBegin(-3, month=4), + {datetime(2007, 4, 1): datetime(2004, 4, 1), + datetime(2007, 3, 1): datetime(2004, 4, 1), + datetime(2007, 12, 15): datetime(2005, 4, 1), + datetime(2012, 1, 31): datetime(2009, 4, 1), })) + for offset, cases in tests: for base, expected in compat.iteritems(cases): assertEq(offset, base, expected) diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index c06d8a3ba9a05..655b92cfe70f3 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -1051,6 +1051,26 @@ cdef inline void _localize_tso(_TSObject obj, object tz): obj.tzinfo = tz +def _localize_pydatetime(object dt, object tz): + ''' + Take a datetime/Timestamp in UTC and localizes to timezone tz. + ''' + if tz is None: + return dt + elif isinstance(dt, Timestamp): + return dt.tz_localize(tz) + elif tz == 'UTC' or tz is UTC: + return UTC.localize(dt) + + elif _treat_tz_as_pytz(tz): + # datetime.replace may return incorrect result in pytz + return tz.localize(dt) + elif _treat_tz_as_dateutil(tz): + return dt.replace(tzinfo=tz) + else: + raise ValueError(type(tz), tz) + + def get_timezone(tz): return _get_zone(tz)
Main Fix is to preserve nanosecond info which can lost during `offset.apply`, but it also includes: - Support dateutil timezone - Little performance improvement. Even though v0.14.1 should take longer than v0.14.0 because perf test in v0.14 doesn't perform timestamp conversion which was fixed in #7502. NOTE: This caches `Tick.delta` because it was calculated 3 times repeatedly, but does it cause any side effect? ### Before ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- timeseries_year_incr | 0.0164 | 0.0103 | 1.5846 | timeseries_year_apply | 0.0153 | 0.0094 | 1.6356 | timeseries_day_incr | 0.0187 | 0.0053 | 3.5075 | timeseries_day_apply | 0.0164 | 0.0033 | 4.9048 | Target [d0076db] : PERF: Improve index.min and max perf Base [da0f7ae] : RLS: 0.14.0 final ``` ### After the fix ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- timeseries_year_incr | 0.0150 | 0.0087 | 1.7339 | timeseries_year_apply | 0.0126 | 0.0073 | 1.7283 | timeseries_day_incr | 0.0130 | 0.0053 | 2.4478 | timeseries_day_apply | 0.0107 | 0.0033 | 3.2143 | Target [64dd021] : BUG: offsets.apply doesnt preserve nanosecond Base [da0f7ae] : RLS: 0.14.0 final ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7697
2014-07-08T15:55:25Z
2014-07-25T15:10:47Z
2014-07-25T15:10:47Z
2014-07-25T20:42:15Z
TST/CLN: centralize numpy < 1.7 skips
diff --git a/pandas/io/tests/test_json/test_pandas.py b/pandas/io/tests/test_json/test_pandas.py index a6bd94153c3bd..f6f705201bf18 100644 --- a/pandas/io/tests/test_json/test_pandas.py +++ b/pandas/io/tests/test_json/test_pandas.py @@ -5,7 +5,7 @@ import numpy as np import nose -from pandas import Series, DataFrame, DatetimeIndex, Timestamp, _np_version_under1p7 +from pandas import Series, DataFrame, DatetimeIndex, Timestamp import pandas as pd read_json = pd.read_json @@ -601,8 +601,7 @@ def test_url(self): self.assertEqual(result[c].dtype, 'datetime64[ns]') def test_timedelta(self): - if _np_version_under1p7: - raise nose.SkipTest("numpy < 1.7") + tm._skip_if_not_numpy17_friendly() from datetime import timedelta converter = lambda x: pd.to_timedelta(x,unit='ms') diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index dd30527b1f82d..d0d1b02577f89 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -2061,11 +2061,7 @@ def compare(a,b): def test_append_with_timezones_dateutil(self): from datetime import timedelta - - try: - import dateutil - except ImportError: - raise nose.SkipTest + tm._skip_if_no_dateutil() # use maybe_get_tz instead of dateutil.tz.gettz to handle the windows filename issues. from pandas.tslib import maybe_get_tz @@ -2186,8 +2182,7 @@ def setTZ(tz): setTZ(orig_tz) def test_append_with_timedelta(self): - if _np_version_under1p7: - raise nose.SkipTest("requires numpy >= 1.7") + tm._skip_if_not_numpy17_friendly() # GH 3577 # append timedelta diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index aa69fb964d947..122b80c3f0076 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -509,8 +509,7 @@ def test_date_and_index(self): def test_timedelta(self): # see #6921 - if _np_version_under1p7: - raise nose.SkipTest("test only valid in numpy >= 1.7") + tm._skip_if_not_numpy17_friendly() df = to_timedelta(Series(['00:00:01', '00:00:03'], name='foo')).to_frame() with tm.assert_produces_warning(UserWarning): @@ -659,7 +658,7 @@ def test_not_reflect_all_tables(self): self.conn.execute(qry) qry = """CREATE TABLE other_table (x INTEGER, y INTEGER);""" self.conn.execute(qry) - + with warnings.catch_warnings(record=True) as w: # Cause all warnings to always be triggered. warnings.simplefilter("always") diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index c2fb7017ee4d6..832671521c815 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -198,8 +198,7 @@ def setUp(self): self.not_valid_objs = [ o for o in self.objs if not o._allow_index_ops ] def test_ops(self): - if _np_version_under1p7: - raise nose.SkipTest("test only valid in numpy >= 1.7") + tm._skip_if_not_numpy17_friendly() for op in ['max','min']: for o in self.objs: result = getattr(o,op)() diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py index 884a2c1a1ae8e..5d785df355aa3 100644 --- a/pandas/tests/test_format.py +++ b/pandas/tests/test_format.py @@ -80,13 +80,6 @@ def has_expanded_repr(df): return True return False -def skip_if_np_version_under1p7(): - if _np_version_under1p7: - import nose - - raise nose.SkipTest('numpy >= 1.7 required') - - class TestDataFrameFormatting(tm.TestCase): _multiprocess_can_split_ = True @@ -2736,7 +2729,7 @@ def test_format(self): class TestRepr_timedelta64(tm.TestCase): @classmethod def setUpClass(cls): - skip_if_np_version_under1p7() + tm._skip_if_not_numpy17_friendly() def test_legacy(self): delta_1d = pd.to_timedelta(1, unit='D') @@ -2784,7 +2777,7 @@ def test_long(self): class TestTimedelta64Formatter(tm.TestCase): @classmethod def setUpClass(cls): - skip_if_np_version_under1p7() + tm._skip_if_not_numpy17_friendly() def test_mixed(self): x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='D') diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index d3bf3cfe32926..1cada8efb6c6f 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -32,8 +32,7 @@ import pandas.core.format as fmt import pandas.core.datetools as datetools from pandas import (DataFrame, Index, Series, notnull, isnull, - MultiIndex, DatetimeIndex, Timestamp, date_range, read_csv, - _np_version_under1p7) + MultiIndex, DatetimeIndex, Timestamp, date_range, read_csv) import pandas as pd from pandas.parser import CParserError from pandas.util.misc import is_little_endian @@ -3772,8 +3771,7 @@ def test_operators_timedelta64(self): self.assertTrue(df['off2'].dtype == 'timedelta64[ns]') def test_datetimelike_setitem_with_inference(self): - if _np_version_under1p7: - raise nose.SkipTest("numpy < 1.7") + tm._skip_if_not_numpy17_friendly() # GH 7592 # assignment of timedeltas with NaT @@ -13036,6 +13034,7 @@ def test_select_dtypes_exclude_include(self): tm.assert_frame_equal(r, e) def test_select_dtypes_not_an_attr_but_still_valid_dtype(self): + tm._skip_if_not_numpy17_friendly() df = DataFrame({'a': list('abc'), 'b': list(range(1, 4)), 'c': np.arange(3, 6).astype('u1'), diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index 82447635473a3..044d4054755ba 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -7,7 +7,7 @@ import pandas as pd from pandas import (Index, Series, DataFrame, Panel, - isnull, notnull,date_range, _np_version_under1p7) + isnull, notnull,date_range) from pandas.core.index import Index, MultiIndex import pandas.core.common as com @@ -160,8 +160,7 @@ def f(): self.assertRaises(ValueError, lambda : not obj1) def test_numpy_1_7_compat_numeric_methods(self): - if _np_version_under1p7: - raise nose.SkipTest("numpy < 1.7") + tm._skip_if_not_numpy17_friendly() # GH 4435 # numpy in 1.7 tries to pass addtional arguments to pandas functions diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index 23a0f39ef3547..6fb88eb5597a9 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -32,11 +32,6 @@ from pandas import _np_version_under1p7 -def _skip_if_need_numpy_1_7(): - if _np_version_under1p7: - raise nose.SkipTest('numpy >= 1.7 required') - - class TestIndex(tm.TestCase): _multiprocess_can_split_ = True @@ -340,7 +335,7 @@ def test_asof(self): tm.assert_isinstance(self.dateIndex.asof(d), Timestamp) def test_nanosecond_index_access(self): - _skip_if_need_numpy_1_7() + tm._skip_if_not_numpy17_friendly() s = Series([Timestamp('20130101')]).values.view('i8')[0] r = DatetimeIndex([s + 50 + i for i in range(100)]) diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index fae403ebb653d..d08f7e1d547c8 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -2721,8 +2721,7 @@ def test_timedelta64_operations_with_integers(self): self.assertRaises(TypeError, sop, s2.values) def test_timedelta64_conversions(self): - if _np_version_under1p7: - raise nose.SkipTest("cannot use 2 argument form of timedelta64 conversions with numpy < 1.7") + tm._skip_if_not_numpy17_friendly() startdate = Series(date_range('2013-01-01', '2013-01-03')) enddate = Series(date_range('2013-03-01', '2013-03-03')) @@ -2835,8 +2834,7 @@ def run_ops(ops, get_ser, test_ser): dt1 + td1 def test_ops_datetimelike_align(self): - if _np_version_under1p7: - raise nose.SkipTest("timedelta broken in np < 1.7") + tm._skip_if_not_numpy17_friendly() # GH 7500 # datetimelike ops need to align @@ -2899,8 +2897,7 @@ def test_timedelta64_functions(self): assert_series_equal(result, expected) def test_timedelta_fillna(self): - if _np_version_under1p7: - raise nose.SkipTest("timedelta broken in np 1.6.1") + tm._skip_if_not_numpy17_friendly() #GH 3371 s = Series([Timestamp('20130101'), Timestamp('20130101'), @@ -3107,8 +3104,7 @@ def test_bfill(self): assert_series_equal(ts.bfill(), ts.fillna(method='bfill')) def test_sub_of_datetime_from_TimeSeries(self): - if _np_version_under1p7: - raise nose.SkipTest("timedelta broken in np 1.6.1") + tm._skip_if_not_numpy17_friendly() from pandas.tseries.timedeltas import _possibly_cast_to_timedelta from datetime import datetime diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py index f2239bba520e7..4601ad0784562 100644 --- a/pandas/tools/tests/test_merge.py +++ b/pandas/tools/tests/test_merge.py @@ -812,8 +812,7 @@ def test_join_append_timedeltas(self): # timedelta64 issues with join/merge # GH 5695 - if _np_version_under1p7: - raise nose.SkipTest("numpy < 1.7") + tm._skip_if_not_numpy17_friendly() d = {'d': dt.datetime(2013, 11, 5, 5, 56), 't': dt.timedelta(0, 22500)} df = DataFrame(columns=list('dt')) @@ -2005,9 +2004,7 @@ def test_concat_datetime64_block(self): def test_concat_timedelta64_block(self): # not friendly for < 1.7 - if _np_version_under1p7: - raise nose.SkipTest("numpy < 1.7") - + tm._skip_if_not_numpy17_friendly() from pandas import to_timedelta rng = to_timedelta(np.arange(10),unit='s') diff --git a/pandas/tseries/tests/test_frequencies.py b/pandas/tseries/tests/test_frequencies.py index 9089ca85ac3bb..37371b5828c8c 100644 --- a/pandas/tseries/tests/test_frequencies.py +++ b/pandas/tseries/tests/test_frequencies.py @@ -137,8 +137,7 @@ def test_microsecond(self): self._check_tick(timedelta(microseconds=1), 'U') def test_nanosecond(self): - if _np_version_under1p7: - raise nose.SkipTest("requires numpy >= 1.7 to run") + tm._skip_if_not_numpy17_friendly() self._check_tick(np.timedelta64(1, 'ns'), 'N') def _check_tick(self, base_delta, code): diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py index e51ec45fe1c79..1ef1bd184bdbc 100644 --- a/pandas/tseries/tests/test_offsets.py +++ b/pandas/tseries/tests/test_offsets.py @@ -180,7 +180,7 @@ def setUp(self): 'Milli': Timestamp('2011-01-01 09:00:00.001000'), 'Micro': Timestamp('2011-01-01 09:00:00.000001'), 'Nano': Timestamp(np.datetime64('2011-01-01T09:00:00.000000001Z'))} - + self.timezones = ['UTC', 'Asia/Tokyo', 'US/Eastern'] def test_return_type(self): @@ -2782,8 +2782,8 @@ def test_Microsecond(): def test_NanosecondGeneric(): - if _np_version_under1p7: - raise nose.SkipTest('numpy >= 1.7 required') + tm._skip_if_not_numpy17_friendly() + timestamp = Timestamp(datetime(2010, 1, 1)) assert timestamp.nanosecond == 0 @@ -2795,8 +2795,7 @@ def test_NanosecondGeneric(): def test_Nanosecond(): - if _np_version_under1p7: - raise nose.SkipTest('numpy >= 1.7 required') + tm._skip_if_not_numpy17_friendly() timestamp = Timestamp(datetime(2010, 1, 1)) assertEq(Nano(), timestamp, timestamp + np.timedelta64(1, 'ns')) diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py index 8e841632d88d3..9d85c599c840c 100644 --- a/pandas/tseries/tests/test_timedeltas.py +++ b/pandas/tseries/tests/test_timedeltas.py @@ -7,7 +7,7 @@ import pandas as pd from pandas import (Index, Series, DataFrame, Timestamp, isnull, notnull, - bdate_range, date_range, _np_version_under1p7) + bdate_range, date_range) import pandas.core.common as com from pandas.compat import StringIO, lrange, range, zip, u, OrderedDict, long from pandas import compat, to_timedelta, tslib @@ -15,14 +15,10 @@ from pandas.util.testing import (assert_series_equal, assert_frame_equal, assert_almost_equal, - ensure_clean) + ensure_clean, + _skip_if_not_numpy17_friendly) import pandas.util.testing as tm -def _skip_if_numpy_not_friendly(): - # not friendly for < 1.7 - if _np_version_under1p7: - raise nose.SkipTest("numpy < 1.7") - class TestTimedeltas(tm.TestCase): _multiprocess_can_split_ = True @@ -30,7 +26,7 @@ def setUp(self): pass def test_numeric_conversions(self): - _skip_if_numpy_not_friendly() + _skip_if_not_numpy17_friendly() self.assertEqual(ct(0), np.timedelta64(0,'ns')) self.assertEqual(ct(10), np.timedelta64(10,'ns')) @@ -42,14 +38,14 @@ def test_numeric_conversions(self): self.assertEqual(ct(10,unit='d'), np.timedelta64(10,'D').astype('m8[ns]')) def test_timedelta_conversions(self): - _skip_if_numpy_not_friendly() + _skip_if_not_numpy17_friendly() self.assertEqual(ct(timedelta(seconds=1)), np.timedelta64(1,'s').astype('m8[ns]')) self.assertEqual(ct(timedelta(microseconds=1)), np.timedelta64(1,'us').astype('m8[ns]')) self.assertEqual(ct(timedelta(days=1)), np.timedelta64(1,'D').astype('m8[ns]')) def test_short_format_converters(self): - _skip_if_numpy_not_friendly() + _skip_if_not_numpy17_friendly() def conv(v): return v.astype('m8[ns]') @@ -97,7 +93,7 @@ def conv(v): self.assertRaises(ValueError, ct, 'foo') def test_full_format_converters(self): - _skip_if_numpy_not_friendly() + _skip_if_not_numpy17_friendly() def conv(v): return v.astype('m8[ns]') @@ -120,13 +116,13 @@ def conv(v): self.assertRaises(ValueError, ct, '- 1days, 00') def test_nat_converters(self): - _skip_if_numpy_not_friendly() + _skip_if_not_numpy17_friendly() self.assertEqual(to_timedelta('nat',box=False).astype('int64'), tslib.iNaT) self.assertEqual(to_timedelta('nan',box=False).astype('int64'), tslib.iNaT) def test_to_timedelta(self): - _skip_if_numpy_not_friendly() + _skip_if_not_numpy17_friendly() def conv(v): return v.astype('m8[ns]') @@ -235,7 +231,7 @@ def testit(unit, transform): self.assertRaises(ValueError, lambda : to_timedelta(1,unit='foo')) def test_to_timedelta_via_apply(self): - _skip_if_numpy_not_friendly() + _skip_if_not_numpy17_friendly() # GH 5458 expected = Series([np.timedelta64(1,'s')]) @@ -246,7 +242,7 @@ def test_to_timedelta_via_apply(self): tm.assert_series_equal(result, expected) def test_timedelta_ops(self): - _skip_if_numpy_not_friendly() + _skip_if_not_numpy17_friendly() # GH4984 # make sure ops return timedeltas @@ -275,7 +271,7 @@ def test_timedelta_ops(self): tm.assert_almost_equal(result, expected) def test_timedelta_ops_scalar(self): - _skip_if_numpy_not_friendly() + _skip_if_not_numpy17_friendly() # GH 6808 base = pd.to_datetime('20130101 09:01:12.123456') @@ -309,7 +305,7 @@ def test_timedelta_ops_scalar(self): self.assertEqual(result, expected_sub) def test_to_timedelta_on_missing_values(self): - _skip_if_numpy_not_friendly() + _skip_if_not_numpy17_friendly() # GH5438 timedelta_NaT = np.timedelta64('NaT') @@ -328,7 +324,7 @@ def test_to_timedelta_on_missing_values(self): self.assertEqual(actual.astype('int64'), timedelta_NaT.astype('int64')) def test_timedelta_ops_with_missing_values(self): - _skip_if_numpy_not_friendly() + _skip_if_not_numpy17_friendly() # setup s1 = pd.to_timedelta(Series(['00:00:01'])) @@ -407,7 +403,7 @@ def test_timedelta_ops_with_missing_values(self): assert_frame_equal(actual, dfn) def test_apply_to_timedelta(self): - _skip_if_numpy_not_friendly() + _skip_if_not_numpy17_friendly() timedelta_NaT = pd.to_timedelta('NaT') diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index f353f08114a2c..1614261542733 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -770,11 +770,13 @@ def test_index_cast_datetime64_other_units(self): self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all()) def test_index_astype_datetime64(self): - idx = Index([datetime(2012, 1, 1)], dtype=object) - + # valid only under 1.7! if not _np_version_under1p7: raise nose.SkipTest("test only valid in numpy < 1.7") + idx = Index([datetime(2012, 1, 1)], dtype=object) + casted = idx.astype(np.dtype('M8[D]')) + casted = idx.astype(np.dtype('M8[D]')) expected = DatetimeIndex(idx.values) tm.assert_isinstance(casted, DatetimeIndex) @@ -2680,9 +2682,7 @@ def assert_index_parameters(self, index): assert index.inferred_freq == '40960N' def test_ns_index(self): - - if _np_version_under1p7: - raise nose.SkipTest + tm._skip_if_not_numpy17_friendly() nsamples = 400 ns = int(1e9 / 24414) diff --git a/pandas/tseries/tests/test_tslib.py b/pandas/tseries/tests/test_tslib.py index 122bb93a878ee..a47d6a178f8b2 100644 --- a/pandas/tseries/tests/test_tslib.py +++ b/pandas/tseries/tests/test_tslib.py @@ -264,8 +264,7 @@ def test_parsing_timezone_offsets(self): class TestTimestampNsOperations(tm.TestCase): def setUp(self): - if _np_version_under1p7: - raise nose.SkipTest('numpy >= 1.7 required') + tm._skip_if_not_numpy17_friendly() self.timestamp = Timestamp(datetime.datetime.utcnow()) def assert_ns_timedelta(self, modified_timestamp, expected_value): diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 9c49014a47da7..0d7ea77e96955 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -41,7 +41,8 @@ from pandas.tseries.index import DatetimeIndex from pandas.tseries.period import PeriodIndex -from pandas import _testing +from pandas import _testing, _np_version_under1p7 + from pandas.io.common import urlopen @@ -209,6 +210,12 @@ def setUpClass(cls): cls.setUpClass = setUpClass return cls +def _skip_if_not_numpy17_friendly(): + # not friendly for < 1.7 + if _np_version_under1p7: + import nose + raise nose.SkipTest("numpy >= 1.7 is required") + def _skip_if_no_scipy(): try: import scipy.stats
TST: skip on older numpy for (GH7694) closes #7694
https://api.github.com/repos/pandas-dev/pandas/pulls/7696
2014-07-08T14:15:02Z
2014-07-08T15:40:09Z
2014-07-08T15:40:09Z
2014-07-08T15:40:09Z
BUG: Fix conditional for underlying price in io.data.options.
diff --git a/pandas/io/data.py b/pandas/io/data.py index 13ced745b7b3f..0b1601b143be0 100644 --- a/pandas/io/data.py +++ b/pandas/io/data.py @@ -736,9 +736,8 @@ def _get_option_data(self, month, year, expiry, name): " found".format(table_loc, ntables)) option_data = _parse_options_data(tables[table_loc]) - option_data = self._process_data(option_data) option_data['Type'] = name[:-1] - option_data.set_index(['Strike', 'Expiry', 'Type', 'Symbol'], inplace=True) + option_data = self._process_data(option_data, name[:-1]) if month == CUR_MONTH and year == CUR_YEAR: setattr(self, name, option_data) @@ -859,8 +858,7 @@ def get_near_stock_price(self, above_below=2, call=True, put=False, month=None, year=None, expiry=None): """ ***Experimental*** - Cuts the data frame opt_df that is passed in to only take - options that are near the current stock price. + Returns a data frame of options that are near the current stock price. Parameters ---------- @@ -889,7 +887,6 @@ def get_near_stock_price(self, above_below=2, call=True, put=False, Note: Format of returned data frame is dependent on Yahoo and may change. """ - year, month, expiry = self._try_parse_dates(year, month, expiry) to_ret = Series({'calls': call, 'puts': put}) to_ret = to_ret[to_ret].index @@ -897,26 +894,31 @@ def get_near_stock_price(self, above_below=2, call=True, put=False, data = {} for nam in to_ret: - if month: - m1 = _two_char_month(month) - name = nam + m1 + str(year)[2:] + df = self._get_option_data(month, year, expiry, nam) + data[nam] = self.chop_data(df, above_below, self.underlying_price) + + return concat([data[nam] for nam in to_ret]).sortlevel() + + def chop_data(self, df, above_below=2, underlying_price=None): + """Returns a data frame only options that are near the current stock price.""" + if not underlying_price: try: - df = getattr(self, name) + underlying_price = self.underlying_price except AttributeError: - meth_name = 'get_{0}_data'.format(nam[:-1]) - df = getattr(self, meth_name)(expiry=expiry) + underlying_price = np.nan - if self.underlying_price: - start_index = np.where(df.index.get_level_values('Strike') - > self.underlying_price)[0][0] + if underlying_price is not np.nan: + start_index = np.where(df.index.get_level_values('Strike') + > underlying_price)[0][0] - get_range = slice(start_index - above_below, + get_range = slice(start_index - above_below, start_index + above_below + 1) - chop = df[get_range].dropna(how='all') - data[nam] = chop + df = df[get_range].dropna(how='all') + + return df + - return concat([data[nam] for nam in to_ret]).sortlevel() @staticmethod def _try_parse_dates(year, month, expiry): @@ -1048,7 +1050,7 @@ def get_forward_data(self, months, call=True, put=False, near=False, frame = self.get_near_stock_price(call=call, put=put, above_below=above_below, month=m2, year=y2) - frame = self._process_data(frame) + frame = self._process_data(frame, name[:-1]) all_data.append(frame) @@ -1178,7 +1180,7 @@ def _parse_url(self, url): return root - def _process_data(self, frame): + def _process_data(self, frame, type): """ Adds columns for Expiry, IsNonstandard (ie: deliverable is not 100 shares) and Tag (the tag indicating what is actually deliverable, None if standard). @@ -1195,5 +1197,7 @@ def _process_data(self, frame): frame['Underlying_Price'] = self.underlying_price frame["Quote_Time"] = self.quote_time frame.rename(columns={'Open Int': 'Open_Int'}, inplace=True) + frame['Type'] = type + frame.set_index(['Strike', 'Expiry', 'Type', 'Symbol'], inplace=True) return frame diff --git a/pandas/io/tests/test_data.py b/pandas/io/tests/test_data.py index 8b5a81f050ced..15ebeba941ccd 100644 --- a/pandas/io/tests/test_data.py +++ b/pandas/io/tests/test_data.py @@ -250,6 +250,9 @@ def setUpClass(cls): cls.html2 = os.path.join(cls.dirpath, 'yahoo_options2.html') cls.root1 = cls.aapl._parse_url(cls.html1) cls.root2 = cls.aapl._parse_url(cls.html2) + cls.tables1 = cls.aapl._parse_option_page_from_yahoo(cls.root1) + cls.unprocessed_data1 = web._parse_options_data(cls.tables1[cls.aapl._TABLE_LOC['puts']]) + cls.data1 = cls.aapl._process_data(cls.unprocessed_data1, 'put') @classmethod def tearDownClass(cls): @@ -324,6 +327,13 @@ def test_sample_page_price_quote_time1(self): self.assertIsInstance(price, (int, float, complex)) self.assertIsInstance(quote_time, (datetime, Timestamp)) + def test_chop(self): + #regression test for #7625 + self.aapl.chop_data(self.data1, above_below=2, underlying_price=np.nan) + chopped = self.aapl.chop_data(self.data1, above_below=2, underlying_price=300) + self.assertIsInstance(chopped, DataFrame) + self.assertTrue(len(chopped) > 1) + @network def test_sample_page_price_quote_time2(self): #Tests the weekday quote time format @@ -334,10 +344,7 @@ def test_sample_page_price_quote_time2(self): @network def test_sample_page_chg_float(self): #Tests that numeric columns with comma's are appropriately dealt with - tables = self.aapl._parse_option_page_from_yahoo(self.root1) - data = web._parse_options_data(tables[self.aapl._TABLE_LOC['puts']]) - option_data = self.aapl._process_data(data) - self.assertEqual(option_data['Chg'].dtype, 'float64') + self.assertEqual(self.data1['Chg'].dtype, 'float64') class TestOptionsWarnings(tm.TestCase):
Refactor and regression test. Fixes #7685
https://api.github.com/repos/pandas-dev/pandas/pulls/7688
2014-07-08T05:31:10Z
2014-07-08T23:33:47Z
2014-07-08T23:33:47Z
2014-07-09T04:38:52Z
PERF: better perf on min/max on indices not containing NaT for DatetimeIndex/PeriodsIndex
diff --git a/pandas/core/base.py b/pandas/core/base.py index 1ba5061cd7e9a..585db0f49d8bf 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -6,7 +6,7 @@ from pandas.core import common as com import pandas.core.nanops as nanops import pandas.tslib as tslib - +from pandas.util.decorators import cache_readonly class StringMixin(object): @@ -392,6 +392,11 @@ def _box_values(self, values): import pandas.lib as lib return lib.map_infer(values, self._box_func) + @cache_readonly + def hasnans(self): + """ return if I have any nans; enables various perf speedups """ + return (self.asi8 == tslib.iNaT).any() + @property def asobject(self): from pandas.core.index import Index @@ -408,11 +413,18 @@ def min(self, axis=None): Overridden ndarray.min to return an object """ try: - mask = self.asi8 == tslib.iNaT - if mask.any(): + i8 = self.asi8 + + # quick check + if len(i8) and self.is_monotonic: + if i8[0] != tslib.iNaT: + return self._box_func(i8[0]) + + if self.hasnans: + mask = i8 == tslib.iNaT min_stamp = self[~mask].asi8.min() else: - min_stamp = self.asi8.min() + min_stamp = i8.min() return self._box_func(min_stamp) except ValueError: return self._na_value @@ -422,11 +434,18 @@ def max(self, axis=None): Overridden ndarray.max to return an object """ try: - mask = self.asi8 == tslib.iNaT - if mask.any(): + i8 = self.asi8 + + # quick check + if len(i8) and self.is_monotonic: + if i8[-1] != tslib.iNaT: + return self._box_func(i8[-1]) + + if self.hasnans: + mask = i8 == tslib.iNaT max_stamp = self[~mask].asi8.max() else: - max_stamp = self.asi8.max() + max_stamp = i8.max() return self._box_func(max_stamp) except ValueError: return self._na_value diff --git a/pandas/core/index.py b/pandas/core/index.py index 51ddacd00af08..262305a335d46 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -2072,7 +2072,7 @@ def __contains__(self, other): try: # if other is a sequence this throws a ValueError - return np.isnan(other) and self._hasnans + return np.isnan(other) and self.hasnans except ValueError: try: return len(other) <= 1 and _try_get_item(other) in self @@ -2109,7 +2109,7 @@ def _isnan(self): return np.isnan(self.values) @cache_readonly - def _hasnans(self): + def hasnans(self): return self._isnan.any() @cache_readonly diff --git a/pandas/lib.pyx b/pandas/lib.pyx index a064e714e7f89..7690cc4819dd5 100644 --- a/pandas/lib.pyx +++ b/pandas/lib.pyx @@ -958,7 +958,7 @@ def is_lexsorted(list list_of_arrays): @cython.boundscheck(False) @cython.wraparound(False) def generate_bins_dt64(ndarray[int64_t] values, ndarray[int64_t] binner, - object closed='left'): + object closed='left', bint hasnans=0): """ Int64 (datetime64) version of generic python version in groupby.py """ @@ -968,9 +968,9 @@ def generate_bins_dt64(ndarray[int64_t] values, ndarray[int64_t] binner, int64_t l_bin, r_bin, nat_count bint right_closed = closed == 'right' - mask = values == iNaT nat_count = 0 - if mask.any(): + if hasnans: + mask = values == iNaT nat_count = np.sum(mask) values = values[~mask] diff --git a/pandas/tseries/resample.py b/pandas/tseries/resample.py index 1ee7664f7bb9a..01aff164d8384 100644 --- a/pandas/tseries/resample.py +++ b/pandas/tseries/resample.py @@ -174,7 +174,7 @@ def _get_time_bins(self, ax): binner, bin_edges = self._adjust_bin_edges(binner, ax_values) # general version, knowing nothing about relative frequencies - bins = lib.generate_bins_dt64(ax_values, bin_edges, self.closed) + bins = lib.generate_bins_dt64(ax_values, bin_edges, self.closed, hasnans=ax.hasnans) if self.closed == 'right': labels = binner @@ -188,7 +188,7 @@ def _get_time_bins(self, ax): elif not trimmed: labels = labels[:-1] - if (ax_values == tslib.iNaT).any(): + if ax.hasnans: binner = binner.insert(0, tslib.NaT) labels = labels.insert(0, tslib.NaT)
closes #7633 close to what it was in 0.14.0 key was to not keep recomputing whether an index `hasnans` every time we need it (it is now cached). further `min/max` are optimized if the index is monotonic ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- timeseries_timestamp_downsample_mean | 4.5697 | 7.8034 | 0.5856 | dataframe_resample_min_string | 1.8380 | 2.5294 | 0.7266 | dataframe_resample_min_numpy | 1.8580 | 2.5463 | 0.7297 | dataframe_resample_max_numpy | 1.8887 | 2.5803 | 0.7320 | dataframe_resample_max_string | 1.9130 | 2.5553 | 0.7486 | dataframe_resample_mean_numpy | 2.6687 | 3.3340 | 0.8004 | dataframe_resample_mean_string | 2.7773 | 3.3080 | 0.8396 | timeseries_period_downsample_mean | 12.2183 | 11.6010 | 1.0532 | Ratio < 1.0 means the target commit is faster then the baseline. Seed used: 1234 Target [d2d30c7] : PERF: better perf on min/max on indices not containing NaT for DatetimeIndex/PeriodIndex Base [e060616] : DOC: minor corrections in v0.14.1 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7684
2014-07-07T18:43:34Z
2014-07-07T19:24:50Z
2014-07-07T19:24:50Z
2014-07-07T19:24:50Z
TST/COMPAT: numpy master compat with timedelta type coercion
diff --git a/pandas/tseries/tests/test_tslib.py b/pandas/tseries/tests/test_tslib.py index 82f05a0de4588..122bb93a878ee 100644 --- a/pandas/tseries/tests/test_tslib.py +++ b/pandas/tseries/tests/test_tslib.py @@ -98,7 +98,7 @@ def test_tz(self): self.assertEqual(conv.hour, 19) def test_barely_oob_dts(self): - one_us = np.timedelta64(1) + one_us = np.timedelta64(1).astype('timedelta64[us]') # By definition we can't go out of bounds in [ns], so we # convert the datetime64s to [us] so we can go out of bounds
https://api.github.com/repos/pandas-dev/pandas/pulls/7681
2014-07-07T14:15:46Z
2014-07-07T15:11:58Z
2014-07-07T15:11:58Z
2014-07-07T15:11:58Z
FIX: to_sql takes the boolean column as text column
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 3159bbfc34e7d..6292868dae669 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -280,3 +280,5 @@ Bug Fixes - Bug in ``pandas.core.strings.str_contains`` does not properly match in a case insensitive fashion when ``regex=False`` and ``case=False`` (:issue:`7505`) - Bug in ``expanding_cov``, ``expanding_corr``, ``rolling_cov``, and ``rolling_corr`` for two arguments with mismatched index (:issue:`7512`) + +- Bug in ``to_sql`` taking the boolean column as text column (:issue:`7678`) \ No newline at end of file diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 252d807d1dc3c..9a479afd86cad 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -733,7 +733,7 @@ def _sqlalchemy_type(self, arr_or_dtype): elif com.is_integer_dtype(arr_or_dtype): # TODO: Refine integer size. return BigInteger - elif com.is_bool(arr_or_dtype): + elif com.is_bool_dtype(arr_or_dtype): return Boolean return Text diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index f3ff84120197a..aa69fb964d947 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -191,6 +191,26 @@ def _load_test1_data(self): self.test_frame1 = DataFrame(data, columns=columns) + def _load_test2_data(self): + df = DataFrame(dict(A=[4, 1, 3, 6], + B=['asd', 'gsq', 'ylt', 'jkl'], + C=[1.1, 3.1, 6.9, 5.3], + D=[False, True, True, False], + E=['1990-11-22', '1991-10-26', '1993-11-26', '1995-12-12'])) + df['E'] = to_datetime(df['E']) + + self.test_frame3 = df + + def _load_test3_data(self): + columns = ['index', 'A', 'B'] + data = [( + '2000-01-03 00:00:00', 2 ** 31 - 1, -1.987670), + ('2000-01-04 00:00:00', -29, -0.0412318367011), + ('2000-01-05 00:00:00', 20000, 0.731167677815), + ('2000-01-06 00:00:00', -290867, 1.56762092543)] + + self.test_frame3 = DataFrame(data, columns=columns) + def _load_raw_sql(self): self.drop_table('types_test_data') self._get_exec().execute(SQL_STRINGS['create_test_types'][self.flavor]) @@ -331,6 +351,8 @@ def setUp(self): self.conn = self.connect() self._load_iris_data() self._load_test1_data() + self._load_test2_data() + self._load_test3_data() self._load_raw_sql() def test_read_sql_iris(self): @@ -391,6 +413,13 @@ def test_to_sql_append(self): self.assertEqual( num_rows, num_entries, "not the same number of rows as entries") + def test_to_sql_type_mapping(self): + sql.to_sql(self.test_frame3, 'test_frame5', + self.conn, flavor='sqlite', index=False) + result = sql.read_sql("SELECT * FROM test_frame5", self.conn) + + tm.assert_frame_equal(self.test_frame3, result) + def test_to_sql_series(self): s = Series(np.arange(5, dtype='int64'), name='series') sql.to_sql(s, "test_series", self.conn, flavor='sqlite', index=False) @@ -651,35 +680,23 @@ class TestSQLLegacyApi(_TestSQLApi): def connect(self, database=":memory:"): return sqlite3.connect(database) - def _load_test2_data(self): - columns = ['index', 'A', 'B'] - data = [( - '2000-01-03 00:00:00', 2 ** 31 - 1, -1.987670), - ('2000-01-04 00:00:00', -29, -0.0412318367011), - ('2000-01-05 00:00:00', 20000, 0.731167677815), - ('2000-01-06 00:00:00', -290867, 1.56762092543)] - - self.test_frame2 = DataFrame(data, columns=columns) - def test_sql_open_close(self): # Test if the IO in the database still work if the connection closed # between the writing and reading (as in many real situations). - self._load_test2_data() - with tm.ensure_clean() as name: conn = self.connect(name) - sql.to_sql(self.test_frame2, "test_frame2_legacy", conn, + sql.to_sql(self.test_frame3, "test_frame3_legacy", conn, flavor="sqlite", index=False) conn.close() conn = self.connect(name) - result = sql.read_sql_query("SELECT * FROM test_frame2_legacy;", + result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;", conn) conn.close() - tm.assert_frame_equal(self.test_frame2, result) + tm.assert_frame_equal(self.test_frame3, result) def test_read_sql_delegate(self): iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
In the original code, `com.is_bool(arr_or_dtype)` checks whether `arr_or_dtype` is a boolean value instead of a boolean dtype. A new function `is_bool_dtype` is added to `pandas.core.common` to fix this bug.
https://api.github.com/repos/pandas-dev/pandas/pulls/7678
2014-07-07T09:47:10Z
2014-07-07T17:46:41Z
2014-07-07T17:46:41Z
2014-07-07T20:07:52Z
TST: skip buggy tests on debian (GH6270, GH7664)
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index dece7be5fbbdf..fae403ebb653d 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -1,5 +1,6 @@ # pylint: disable-msg=E1101,W0612 +import sys from datetime import datetime, timedelta import operator import string @@ -5541,6 +5542,11 @@ def test_isin_with_i8(self): #------------------------------------------------------------------------------ # TimeSeries-specific def test_cummethods_bool(self): + # GH 6270 + # looks like a buggy np.maximum.accumulate for numpy 1.6.1, py 3.2 + if _np_version_under1p7 and sys.version_info[0] == 3 and sys.version_info[1] == 2: + raise nose.SkipTest("failure of GH6270 on numpy < 1.7 and py 3.2") + def cummin(x): return np.minimum.accumulate(x) diff --git a/pandas/tseries/tests/test_plotting.py b/pandas/tseries/tests/test_plotting.py index f7aaf3e273b40..0bdba3751b6fd 100644 --- a/pandas/tseries/tests/test_plotting.py +++ b/pandas/tseries/tests/test_plotting.py @@ -132,7 +132,10 @@ def check_format_of_first_point(ax, expected_string): first_line = ax.get_lines()[0] first_x = first_line.get_xdata()[0].ordinal first_y = first_line.get_ydata()[0] - self.assertEqual(expected_string, ax.format_coord(first_x, first_y)) + try: + self.assertEqual(expected_string, ax.format_coord(first_x, first_y)) + except (ValueError): + raise nose.SkipTest("skipping test because issue forming test comparison GH7664") annual = Series(1, index=date_range('2014-01-01', periods=3, freq='A-DEC')) check_format_of_first_point(annual.plot(), 't = 2014 y = 1.000000')
closes #6270 closes #7664
https://api.github.com/repos/pandas-dev/pandas/pulls/7675
2014-07-06T17:43:50Z
2014-07-07T16:00:22Z
2014-07-07T16:00:22Z
2014-07-07T16:01:01Z
DOC: remove extra spaces from option descriptions
diff --git a/pandas/core/config.py b/pandas/core/config.py index 9b74ef0d9d3c0..a16b32d5dd185 100644 --- a/pandas/core/config.py +++ b/pandas/core/config.py @@ -334,8 +334,8 @@ def __doc__(self): Parameters ---------- -pat : str/regex - If specified only options matching `prefix*` will be reset. +pat : str/regex + If specified only options matching `prefix*` will be reset. Note: partial matches are supported for convenience, but unless you use the full option name (e.g. x.y.z.option_name), your code may break in future versions if new options with similar names are introduced. @@ -368,7 +368,7 @@ class option_context(object): Context manager to temporarily set options in the `with` statement context. You need to invoke as ``option_context(pat, val, [(pat, val), ...])``. - + Examples -------- @@ -628,20 +628,21 @@ def _build_option_description(k): o = _get_registered_option(k) d = _get_deprecated_option(k) - s = u('%s : ') % k - if o: - s += u('[default: %s] [currently: %s]') % (o.defval, - _get_option(k, True)) + s = u('%s ') % k if o.doc: - s += '\n '.join(o.doc.strip().split('\n')) + s += '\n'.join(o.doc.strip().split('\n')) else: - s += 'No description available.\n' + s += 'No description available.' + + if o: + s += u('\n [default: %s] [currently: %s]') % (o.defval, + _get_option(k, True)) if d: s += u('\n\t(Deprecated') s += (u(', use `%s` instead.') % d.rkey if d.rkey else '') - s += u(')\n') + s += u(')') s += '\n\n' return s
There are already 4 spaces in the description strings in config_init.py, so no need to add some more. This caused the descriptions to be longer than 79 characters, and so line wrapping in the terminal. Closes #6838. Plus, moved the default and current values to the last line of the description as proposed by @jseabold Example output now is: ``` display.max_colwidth : int The maximum width in characters of a column in the repr of a pandas data structure. When the column overflows, a "..." placeholder is embedded in the output. [default: 50] [currently: 50] display.max_info_columns : int max_info_columns is used in DataFrame.info method to decide if per column information will be printed. [default: 100] [currently: 100] ``` Previous in 0.14 this was: ``` display.max_colwidth : [default: 50] [currently: 50]: int The maximum width in characters of a column in the repr of a pandas data structure. When the column overflows, a "..." placeholder is embedded in the output. display.max_info_columns : [default: 100] [currently: 100]: int max_info_columns is used in DataFrame.info method to decide if per column information will be printed. ``` Before 0.14 it was worse (as reported in #6838), but I already improved it a bit some time ago.
https://api.github.com/repos/pandas-dev/pandas/pulls/7674
2014-07-06T14:29:18Z
2014-07-07T07:09:55Z
2014-07-07T07:09:55Z
2014-07-07T07:10:04Z
PERF: improve resample perf
diff --git a/pandas/core/base.py b/pandas/core/base.py index b06b0856d5909..1ba5061cd7e9a 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -4,6 +4,9 @@ from pandas import compat import numpy as np from pandas.core import common as com +import pandas.core.nanops as nanops +import pandas.tslib as tslib + class StringMixin(object): @@ -236,13 +239,11 @@ def _wrap_access_object(self, obj): def max(self): """ The maximum value of the object """ - import pandas.core.nanops - return pandas.core.nanops.nanmax(self.values) + return nanops.nanmax(self.values) def min(self): """ The minimum value of the object """ - import pandas.core.nanops - return pandas.core.nanops.nanmin(self.values) + return nanops.nanmin(self.values) def value_counts(self, normalize=False, sort=True, ascending=False, bins=None, dropna=True): @@ -406,31 +407,29 @@ def min(self, axis=None): """ Overridden ndarray.min to return an object """ - import pandas.tslib as tslib - mask = self.asi8 == tslib.iNaT - masked = self[~mask] - if len(masked) == 0: - return self._na_value - elif self.is_monotonic: - return masked[0] - else: - min_stamp = masked.asi8.min() + try: + mask = self.asi8 == tslib.iNaT + if mask.any(): + min_stamp = self[~mask].asi8.min() + else: + min_stamp = self.asi8.min() return self._box_func(min_stamp) + except ValueError: + return self._na_value def max(self, axis=None): """ Overridden ndarray.max to return an object """ - import pandas.tslib as tslib - mask = self.asi8 == tslib.iNaT - masked = self[~mask] - if len(masked) == 0: - return self._na_value - elif self.is_monotonic: - return masked[-1] - else: - max_stamp = masked.asi8.max() + try: + mask = self.asi8 == tslib.iNaT + if mask.any(): + max_stamp = self[~mask].asi8.max() + else: + max_stamp = self.asi8.max() return self._box_func(max_stamp) + except ValueError: + return self._na_value @property def _formatter_func(self): diff --git a/pandas/lib.pyx b/pandas/lib.pyx index 89e681e6f1c90..a064e714e7f89 100644 --- a/pandas/lib.pyx +++ b/pandas/lib.pyx @@ -965,12 +965,14 @@ def generate_bins_dt64(ndarray[int64_t] values, ndarray[int64_t] binner, cdef: Py_ssize_t lenidx, lenbin, i, j, bc, vc ndarray[int64_t] bins - int64_t l_bin, r_bin + int64_t l_bin, r_bin, nat_count bint right_closed = closed == 'right' mask = values == iNaT - nat_count = values[mask].size - values = values[~mask] + nat_count = 0 + if mask.any(): + nat_count = np.sum(mask) + values = values[~mask] lenidx = len(values) lenbin = len(binner) @@ -991,17 +993,22 @@ def generate_bins_dt64(ndarray[int64_t] values, ndarray[int64_t] binner, bc = 0 # bin count # linear scan - for i in range(0, lenbin - 1): - l_bin = binner[i] - r_bin = binner[i+1] - - # count values in current bin, advance to next bin - while j < lenidx and (values[j] < r_bin or - (right_closed and values[j] == r_bin)): - j += 1 - - bins[bc] = j - bc += 1 + if right_closed: + for i in range(0, lenbin - 1): + r_bin = binner[i+1] + # count values in current bin, advance to next bin + while j < lenidx and values[j] <= r_bin: + j += 1 + bins[bc] = j + bc += 1 + else: + for i in range(0, lenbin - 1): + r_bin = binner[i+1] + # count values in current bin, advance to next bin + while j < lenidx and values[j] < r_bin: + j += 1 + bins[bc] = j + bc += 1 if nat_count > 0: # shift bins by the number of NaT diff --git a/pandas/src/generate_code.py b/pandas/src/generate_code.py index 4098ac06c2da2..842be5a1645bf 100644 --- a/pandas/src/generate_code.py +++ b/pandas/src/generate_code.py @@ -1584,7 +1584,7 @@ def group_mean_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, for i in range(ngroups): for j in range(K): count = nobs[i, j] - if nobs[i, j] == 0: + if count == 0: out[i, j] = nan else: out[i, j] = sumx[i, j] / count diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index bcb68ded6fda7..d1fe287bf33be 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -27,6 +27,8 @@ # convert to/from datetime/timestamp to allow invalid Timestamp ranges to pass thru def as_timestamp(obj): try: + if isinstance(obj, Timestamp): + return obj return Timestamp(obj) except (OutOfBoundsDatetime): pass @@ -2014,9 +2016,21 @@ def delta(self): def nanos(self): return _delta_to_nanoseconds(self.delta) - @apply_wraps def apply(self, other): - if isinstance(other, (datetime, timedelta)): + # Timestamp can handle tz and nano sec, thus no need to use apply_wraps + if type(other) == date: + other = datetime(other.year, other.month, other.day) + elif isinstance(other, (np.datetime64, datetime)): + other = as_timestamp(other) + + if isinstance(other, datetime): + result = other + self.delta + if self.normalize: + # normalize_date returns normal datetime + result = tslib.normalize_date(result) + return as_timestamp(result) + + elif isinstance(other, timedelta): return other + self.delta elif isinstance(other, type(self)): return type(self)(self.n + other.n) diff --git a/pandas/tseries/resample.py b/pandas/tseries/resample.py index 059a6bfd06719..1ee7664f7bb9a 100644 --- a/pandas/tseries/resample.py +++ b/pandas/tseries/resample.py @@ -152,7 +152,8 @@ def _get_time_bins(self, ax): binner = labels = DatetimeIndex(data=[], freq=self.freq, name=ax.name) return binner, [], labels - first, last = _get_range_edges(ax, self.freq, closed=self.closed, + first, last = ax.min(), ax.max() + first, last = _get_range_edges(first, last, self.freq, closed=self.closed, base=self.base) tz = ax.tz binner = labels = DatetimeIndex(freq=self.freq, @@ -163,7 +164,7 @@ def _get_time_bins(self, ax): # a little hack trimmed = False - if (len(binner) > 2 and binner[-2] == ax.max() and + if (len(binner) > 2 and binner[-2] == last and self.closed == 'right'): binner = binner[:-1] @@ -353,11 +354,10 @@ def _take_new_index(obj, indexer, new_index, axis=0): raise NotImplementedError -def _get_range_edges(axis, offset, closed='left', base=0): +def _get_range_edges(first, last, offset, closed='left', base=0): if isinstance(offset, compat.string_types): offset = to_offset(offset) - first, last = axis.min(), axis.max() if isinstance(offset, Tick): day_nanos = _delta_to_nanoseconds(timedelta(1)) # #1165 diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index 090b49bde68a6..70b6b308b6b37 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -3134,14 +3134,21 @@ def period_asfreq_arr(ndarray[int64_t] arr, int freq1, int freq2, bint end): else: relation = START - for i in range(n): - if arr[i] == iNaT: - result[i] = iNaT - continue - val = func(arr[i], relation, &finfo) - if val == INT32_MIN: - raise ValueError("Unable to convert to desired frequency.") - result[i] = val + mask = arr == iNaT + if mask.any(): # NaT process + for i in range(n): + val = arr[i] + if val != iNaT: + val = func(val, relation, &finfo) + if val == INT32_MIN: + raise ValueError("Unable to convert to desired frequency.") + result[i] = val + else: + for i in range(n): + val = func(arr[i], relation, &finfo) + if val == INT32_MIN: + raise ValueError("Unable to convert to desired frequency.") + result[i] = val return result
Related to #7633. It gets better than the result attached #7633, but still slower more than 1.2 times compared to 1.4.0 Modified: - Avoid every time module `import` in `Index.max/min` - Avoid duplicated `max` call from `resample/_get_time_bins` and `_get_range_edges`. - Optimize `lib/generate_bins_dt64` and `tslib/period_asfreq_arr`. Remaining bottlenecks are `NaT` masking performed in `lib/generate_bins_dt64` and `tslib/period_asfreq_arr`. Is there any better way to do that? ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- dataframe_resample_mean_numpy | 4.9963 | 3.7940 | 1.3169 | dataframe_resample_mean_string | 5.0424 | 3.8280 | 1.3172 | dataframe_resample_max_numpy | 4.1796 | 3.0069 | 1.3900 | dataframe_resample_min_numpy | 4.2127 | 2.9987 | 1.4049 | dataframe_resample_min_string | 4.1687 | 2.9490 | 1.4136 | dataframe_resample_max_string | 4.3443 | 2.9283 | 1.4835 | timeseries_timestamp_downsample_mean | 16.1959 | 8.6366 | 1.8753 | timeseries_period_downsample_mean | 47.6096 | 19.7030 | 2.4164 | ------------------------------------------------------------------------------- Ratio < 1.0 means the target commit is faster then the baseline. Seed used: 1234 Target [54fb875] : PERF: Improve index.min and max perf Base [da0f7ae] : RLS: 0.14.0 final ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7673
2014-07-05T23:49:20Z
2014-07-07T13:12:20Z
2014-07-07T13:12:20Z
2014-07-09T12:37:40Z
Add some documentation on gotchas related to pytz updates #7620
diff --git a/doc/source/io.rst b/doc/source/io.rst index bc58b04de4473..7d16d9309021d 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -2911,6 +2911,8 @@ Furthermore ``ptrepack in.h5 out.h5`` will *repack* the file to allow you to reuse previously deleted space. Aalternatively, one can simply remove the file and write again, or use the ``copy`` method. +.. _io.hdf5-notes: + Notes & Caveats ~~~~~~~~~~~~~~~ @@ -2933,6 +2935,13 @@ Notes & Caveats ``tables``. The sizes of a string based indexing column (e.g. *columns* or *minor_axis*) are determined as the maximum size of the elements in that axis or by passing the parameter + - Be aware that timezones (e.g., ``pytz.timezone('US/Eastern')``) + are not necessarily equal across timezone versions. So if data is + localized to a specific timezone in the HDFStore using one version + of a timezone library and that data is updated with another version, the data + will be converted to UTC since these timezones are not considered + equal. Either use the same version of timezone library or use ``tz_convert`` with + the updated timezone definition. .. warning:: diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 795bbca673f77..a75e943d7cec0 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -1342,7 +1342,14 @@ tz-aware data to another time zone: Be wary of conversions between libraries. For some zones ``pytz`` and ``dateutil`` have different definitions of the zone. This is more of a problem for unusual timezones than for - 'standard' zones like ``US/Eastern``. + 'standard' zones like ``US/Eastern``. + +.. warning:: + + Be aware that a timezone definition across versions of timezone libraries may not + be considered equal. This may cause problems when working with stored data that + is localized using one version and operated on with a different version. + See :ref:`here<io.hdf5-notes>` for how to handle such a situation. Under the hood, all timestamps are stored in UTC. Scalar values from a ``DatetimeIndex`` with a time zone will have their fields (day, hour, minute)
closes #7620
https://api.github.com/repos/pandas-dev/pandas/pulls/7672
2014-07-05T21:27:39Z
2014-07-06T14:45:15Z
2014-07-06T14:45:14Z
2014-07-06T19:02:51Z
DOC: remove mention of TimeSeries in docs
diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst index adcf2fca9b4c5..9221f2685d79b 100644 --- a/doc/source/dsintro.rst +++ b/doc/source/dsintro.rst @@ -577,10 +577,8 @@ row-wise. For example: df - df.iloc[0] -In the special case of working with time series data, if the Series is a -TimeSeries (which it will be automatically if the index contains datetime -objects), and the DataFrame index also contains dates, the broadcasting will be -column-wise: +In the special case of working with time series data, and the DataFrame index +also contains dates, the broadcasting will be column-wise: .. ipython:: python :okwarning: diff --git a/doc/source/faq.rst b/doc/source/faq.rst index 20762e3fc039f..1fc8488e92fde 100644 --- a/doc/source/faq.rst +++ b/doc/source/faq.rst @@ -207,9 +207,9 @@ properties. Here are the pandas equivalents: Frequency conversion ~~~~~~~~~~~~~~~~~~~~ -Frequency conversion is implemented using the ``resample`` method on TimeSeries -and DataFrame objects (multiple time series). ``resample`` also works on panels -(3D). Here is some code that resamples daily data to monthly: +Frequency conversion is implemented using the ``resample`` method on Series +and DataFrame objects with a DatetimeIndex or PeriodIndex. ``resample`` also +works on panels (3D). Here is some code that resamples daily data to montly: .. ipython:: python @@ -369,4 +369,3 @@ just a thin layer around the ``QTableView``. mw = MainWidget() mw.show() app.exec_() - diff --git a/doc/source/overview.rst b/doc/source/overview.rst index 49a788def2854..b1addddc2121d 100644 --- a/doc/source/overview.rst +++ b/doc/source/overview.rst @@ -9,7 +9,7 @@ Package overview :mod:`pandas` consists of the following things * A set of labeled array data structures, the primary of which are - Series/TimeSeries and DataFrame + Series and DataFrame * Index objects enabling both simple axis indexing and multi-level / hierarchical axis indexing * An integrated group by engine for aggregating and transforming data sets @@ -32,7 +32,6 @@ Data structures at a glance :widths: 15, 20, 50 1, Series, "1D labeled homogeneously-typed array" - 1, TimeSeries, "Series with index containing datetimes" 2, DataFrame, "General 2D labeled, size-mutable tabular structure with potentially heterogeneously-typed columns" 3, Panel, "General 3D labeled, also size-mutable array" diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index b69b523d9c908..ce1035e91391a 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -1008,7 +1008,7 @@ Time series-related instance methods Shifting / lagging ~~~~~~~~~~~~~~~~~~ -One may want to *shift* or *lag* the values in a TimeSeries back and forward in +One may want to *shift* or *lag* the values in a time series back and forward in time. The method for this is ``shift``, which is available on all of the pandas objects. @@ -1026,7 +1026,7 @@ The shift method accepts an ``freq`` argument which can accept a ts.shift(5, freq='BM') Rather than changing the alignment of the data and the index, ``DataFrame`` and -``TimeSeries`` objects also have a ``tshift`` convenience method that changes +``Series`` objects also have a ``tshift`` convenience method that changes all the dates in the index by a specified number of offsets: .. ipython:: python @@ -1569,7 +1569,7 @@ time zones using ``tz_convert``: rng_berlin[5] rng_eastern[5].tz_convert('Europe/Berlin') -Localization of Timestamps functions just like DatetimeIndex and TimeSeries: +Localization of Timestamps functions just like DatetimeIndex and Series: .. ipython:: python @@ -1577,8 +1577,8 @@ Localization of Timestamps functions just like DatetimeIndex and TimeSeries: rng[5].tz_localize('Asia/Shanghai') -Operations between TimeSeries in different time zones will yield UTC -TimeSeries, aligning the data on the UTC timestamps: +Operations between Series in different time zones will yield UTC +Series, aligning the data on the UTC timestamps: .. ipython:: python
As a Series with a DatetimeIndex is no longer presented as a `TimeSeries`, I think we also should not longer mention it in the docs as a 'seperate object', so removed the last few mentions.
https://api.github.com/repos/pandas-dev/pandas/pulls/7671
2014-07-05T14:17:52Z
2015-05-15T08:10:13Z
2015-05-15T08:10:13Z
2015-06-02T19:26:59Z
Update docs to use display.width instead of deprecated line_width.
diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst index 44bff4e5a8885..7c43a03e68013 100644 --- a/doc/source/dsintro.rst +++ b/doc/source/dsintro.rst @@ -630,19 +630,19 @@ default: DataFrame(randn(3, 12)) -You can change how much to print on a single row by setting the ``line_width`` +You can change how much to print on a single row by setting the ``display.width`` option: .. ipython:: python - set_option('line_width', 40) # default is 80 + set_option('display.width', 40) # default is 80 DataFrame(randn(3, 12)) .. ipython:: python :suppress: - reset_option('line_width') + reset_option('display.width') You can also disable this feature via the ``expand_frame_repr`` option. This will print the table in one block.
https://api.github.com/repos/pandas-dev/pandas/pulls/7669
2014-07-05T07:52:42Z
2014-07-05T09:05:37Z
2014-07-05T09:05:37Z
2014-07-05T09:05:37Z
BUG: windows failure on GH7667
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 280c4073b0f94..d387cb647d8c2 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -348,6 +348,9 @@ def _setitem_with_indexer(self, indexer, value): "with a different length than the value" ) + # make sure we have an ndarray + value = getattr(value,'values',value).ravel() + # we can directly set the series here # as we select a slice indexer on the mi idx = index._convert_slice_indexer(idx)
https://api.github.com/repos/pandas-dev/pandas/pulls/7668
2014-07-04T17:23:44Z
2014-07-04T17:23:48Z
2014-07-04T17:23:48Z
2014-07-22T18:40:51Z
BUG: Bug in multi-index slice setting, related GH3738
diff --git a/pandas/core/index.py b/pandas/core/index.py index 525d17c7612a7..51ddacd00af08 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -523,6 +523,10 @@ def _convert_slice_indexer_getitem(self, key, is_index_slice=False): def _convert_slice_indexer(self, key, typ=None): """ convert a slice indexer. disallow floats in the start/stop/step """ + # if we are not a slice, then we are done + if not isinstance(key, slice): + return key + # validate iloc if typ == 'iloc': @@ -2008,6 +2012,11 @@ def _convert_scalar_indexer(self, key, typ=None): def _convert_slice_indexer(self, key, typ=None): """ convert a slice indexer, by definition these are labels unless we are iloc """ + + # if we are not a slice, then we are done + if not isinstance(key, slice): + return key + if typ == 'iloc': return super(Float64Index, self)._convert_slice_indexer(key, typ=typ) diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index 1a4da63a135a2..0e962800fef08 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -1883,6 +1883,29 @@ def f(): df.loc['bar'] *= 2 self.assertRaises(TypeError, f) + # from SO + #http://stackoverflow.com/questions/24572040/pandas-access-the-level-of-multiindex-for-inplace-operation + df_orig = DataFrame.from_dict({'price': { + ('DE', 'Coal', 'Stock'): 2, + ('DE', 'Gas', 'Stock'): 4, + ('DE', 'Elec', 'Demand'): 1, + ('FR', 'Gas', 'Stock'): 5, + ('FR', 'Solar', 'SupIm'): 0, + ('FR', 'Wind', 'SupIm'): 0}}) + df_orig.index = MultiIndex.from_tuples(df_orig.index, names=['Sit', 'Com', 'Type']) + + expected = df_orig.copy() + expected.iloc[[0,2,3]] *= 2 + + idx = pd.IndexSlice + df = df_orig.copy() + df.loc[idx[:,:,'Stock'],:] *= 2 + assert_frame_equal(df, expected) + + df = df_orig.copy() + df.loc[idx[:,:,'Stock'],'price'] *= 2 + assert_frame_equal(df, expected) + def test_getitem_multiindex(self): # GH 5725
https://api.github.com/repos/pandas-dev/pandas/pulls/7667
2014-07-04T15:41:25Z
2014-07-04T16:10:17Z
2014-07-04T16:10:16Z
2014-07-04T16:10:17Z
TST/CLN: Refactor io.data.options class to improve testing
diff --git a/pandas/io/data.py b/pandas/io/data.py index 67a841a27f992..13ced745b7b3f 100644 --- a/pandas/io/data.py +++ b/pandas/io/data.py @@ -661,31 +661,35 @@ def get_options_data(self, month=None, year=None, expiry=None): _OPTIONS_BASE_URL = 'http://finance.yahoo.com/q/op?s={sym}' - def _get_option_tables(self, month, year, expiry): + def _get_option_tables(self, expiry): + root = self._get_option_page_from_yahoo(expiry) + tables = self._parse_option_page_from_yahoo(root) + m1 = _two_char_month(expiry.month) + table_name = '_tables' + m1 + str(expiry.year)[-2:] + setattr(self, table_name, tables) + return tables - year, month, expiry = self._try_parse_dates(year, month, expiry) + def _get_option_page_from_yahoo(self, expiry): url = self._OPTIONS_BASE_URL.format(sym=self.symbol) - if month and year: # try to get specified month from yahoo finance - m1 = _two_char_month(month) + m1 = _two_char_month(expiry.month) - # if this month use other url - if month == CUR_MONTH and year == CUR_YEAR: - url += '+Options' - else: - url += '&m={year}-{m1}'.format(year=year, m1=m1) - else: # Default to current month + # if this month use other url + if expiry.month == CUR_MONTH and expiry.year == CUR_YEAR: url += '+Options' + else: + url += '&m={year}-{m1}'.format(year=expiry.year, m1=m1) root = self._parse_url(url) + return root + + def _parse_option_page_from_yahoo(self, root): + tables = root.xpath('.//table') ntables = len(tables) if ntables == 0: - raise RemoteDataError("No tables found at {0!r}".format(url)) - - table_name = '_tables' + m1 + str(year)[-2:] - setattr(self, table_name, tables) + raise RemoteDataError("No tables found") try: self.underlying_price, self.quote_time = self._get_underlying_price(root) @@ -723,7 +727,7 @@ def _get_option_data(self, month, year, expiry, name): try: tables = getattr(self, table_name) except AttributeError: - tables = self._get_option_tables(month, year, expiry) + tables = self._get_option_tables(expiry) ntables = len(tables) table_loc = self._TABLE_LOC[name] @@ -903,13 +907,14 @@ def get_near_stock_price(self, above_below=2, call=True, put=False, meth_name = 'get_{0}_data'.format(nam[:-1]) df = getattr(self, meth_name)(expiry=expiry) - start_index = np.where(df.index.get_level_values('Strike') + if self.underlying_price: + start_index = np.where(df.index.get_level_values('Strike') > self.underlying_price)[0][0] - get_range = slice(start_index - above_below, + get_range = slice(start_index - above_below, start_index + above_below + 1) - chop = df[get_range].dropna(how='all') - data[nam] = chop + chop = df[get_range].dropna(how='all') + data[nam] = chop return concat([data[nam] for nam in to_ret]).sortlevel() @@ -948,6 +953,8 @@ def _try_parse_dates(year, month, expiry): year = CUR_YEAR month = CUR_MONTH expiry = dt.date(year, month, 1) + else: + expiry = dt.date(year, month, 1) return year, month, expiry @@ -1127,7 +1134,11 @@ def _get_expiry_months(self): url = 'http://finance.yahoo.com/q/op?s={sym}'.format(sym=self.symbol) root = self._parse_url(url) - links = root.xpath('.//*[@id="yfncsumtab"]')[0].xpath('.//a') + try: + links = root.xpath('.//*[@id="yfncsumtab"]')[0].xpath('.//a') + except IndexError: + return RemoteDataError('Expiry months not available') + month_gen = (element.attrib['href'].split('=')[-1] for element in links if '/q/op?s=' in element.attrib['href'] diff --git a/pandas/io/tests/test_data.py b/pandas/io/tests/test_data.py index 5d2a8ef08c95b..8b5a81f050ced 100644 --- a/pandas/io/tests/test_data.py +++ b/pandas/io/tests/test_data.py @@ -334,7 +334,7 @@ def test_sample_page_price_quote_time2(self): @network def test_sample_page_chg_float(self): #Tests that numeric columns with comma's are appropriately dealt with - tables = self.root1.xpath('.//table') + tables = self.aapl._parse_option_page_from_yahoo(self.root1) data = web._parse_options_data(tables[self.aapl._TABLE_LOC['puts']]) option_data = self.aapl._process_data(data) self.assertEqual(option_data['Chg'].dtype, 'float64')
Removed most references to month and year (passing expiry date around instead). Also protected with RemoteDataError checking for the expiry month links. Fixes #7648
https://api.github.com/repos/pandas-dev/pandas/pulls/7665
2014-07-04T04:58:46Z
2014-07-07T11:09:41Z
2014-07-07T11:09:41Z
2014-07-07T11:09:46Z
API: disallow inplace setting with where and a non-np.nan value (GH7656)
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 850e7e13db2ff..8ede5f32dded6 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -24,11 +24,6 @@ users upgrade to this version. API changes ~~~~~~~~~~~ - - - - - - All ``offsets`` suppports ``normalize`` keyword to specify whether ``offsets.apply``, ``rollforward`` and ``rollback`` resets time (hour, minute, etc) or not (default ``False``, preserves time) (:issue:`7156`) @@ -60,6 +55,8 @@ API changes - Bug in ``.loc`` performing fallback integer indexing with ``object`` dtype indices (:issue:`7496`) - Add back ``#N/A N/A`` as a default NA value in text parsing, (regresion from 0.12) (:issue:`5521`) +- Raise a ``TypeError`` on inplace-setting with a ``.where`` and a non ``np.nan`` value as this is inconsistent + with a set-item expression like ``df[mask] = None`` (:issue:`7656`) .. _whatsnew_0141.prior_deprecations: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 049d3b6a8578c..da9fb44f80b09 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -679,8 +679,8 @@ def to_gbq(self, destination_table, project_id=None, chunksize=10000, the defined table schema and column types. For simplicity, this method uses the Google BigQuery streaming API. The to_gbq method chunks data into a default chunk size of 10,000. Failures return the complete error - response which can be quite long depending on the size of the insert. - There are several important limitations of the Google streaming API + response which can be quite long depending on the size of the insert. + There are several important limitations of the Google streaming API which are detailed at: https://developers.google.com/bigquery/streaming-data-into-bigquery. @@ -1925,11 +1925,7 @@ def _setitem_frame(self, key, value): if key.values.dtype != np.bool_: raise TypeError('Must pass DataFrame with boolean values only') - if self._is_mixed_type: - if not self._is_numeric_mixed_type: - raise TypeError( - 'Cannot do boolean setting on mixed-type frame') - + self._check_inplace_setting(value) self._check_setitem_copy() self.where(-key, value, inplace=True) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 756de479a471a..c88aced3de8a2 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1910,6 +1910,24 @@ def _is_datelike_mixed_type(self): f = lambda: self._data.is_datelike_mixed_type return self._protect_consolidate(f) + def _check_inplace_setting(self, value): + """ check whether we allow in-place setting with this type of value """ + + if self._is_mixed_type: + if not self._is_numeric_mixed_type: + + # allow an actual np.nan thru + try: + if np.isnan(value): + return True + except: + pass + + raise TypeError( + 'Cannot do inplace boolean setting on mixed-types with a non np.nan value') + + return True + def _protect_consolidate(self, f): blocks_before = len(self._data.blocks) result = f() @@ -3214,6 +3232,8 @@ def where(self, cond, other=np.nan, inplace=False, axis=None, level=None, if inplace: # we may have different type blocks come out of putmask, so # reconstruct the block manager + + self._check_inplace_setting(other) new_data = self._data.putmask(mask=cond, new=other, align=axis is None, inplace=True) self._update_inplace(new_data) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 7368fcf8dac26..d7f8d235d4229 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -9242,6 +9242,12 @@ def test_where_none(self): expected = DataFrame({'series': Series([0,1,2,3,4,5,6,7,np.nan,np.nan]) }) assert_frame_equal(df, expected) + # GH 7656 + df = DataFrame([{'A': 1, 'B': np.nan, 'C': 'Test'}, {'A': np.nan, 'B': 'Test', 'C': np.nan}]) + expected = df.where(~isnull(df), None) + with tm.assertRaisesRegexp(TypeError, 'boolean setting on mixed-type'): + df.where(~isnull(df), None, inplace=True) + def test_where_align(self): def create():
closes #7656
https://api.github.com/repos/pandas-dev/pandas/pulls/7657
2014-07-03T15:40:07Z
2014-07-03T16:26:12Z
2014-07-03T16:26:12Z
2023-05-18T15:27:18Z
PERF: fix perf issue in tz conversions w/o affecting DST transitions
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 09ff6578160f8..441a5e8a99c78 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -599,7 +599,7 @@ def _period_group(freqstr): def _period_str_to_code(freqstr): # hack freqstr = _rule_aliases.get(freqstr, freqstr) - + if freqstr not in _dont_uppercase: freqstr = _rule_aliases.get(freqstr.lower(), freqstr) @@ -659,6 +659,25 @@ def infer_freq(index, warn=True): _ONE_HOUR = 60 * _ONE_MINUTE _ONE_DAY = 24 * _ONE_HOUR +def _tz_convert_with_transitions(values, to_tz, from_tz): + """ + convert i8 values from the specificed timezone to the to_tz zone, taking + into account DST transitions + """ + + # vectorization is slow, so tests if we can do this via the faster tz_convert + f = lambda x: tslib.tz_convert_single(x, to_tz, from_tz) + + if len(values) > 2: + first_slow, last_slow = f(values[0]),f(values[-1]) + + first_fast, last_fast = tslib.tz_convert(np.array([values[0],values[-1]],dtype='i8'),to_tz,from_tz) + + # don't cross a DST, so ok + if first_fast == first_slow and last_fast == last_slow: + return tslib.tz_convert(values,to_tz,from_tz) + + return np.vectorize(f)(values) class _FrequencyInferer(object): """ @@ -670,10 +689,7 @@ def __init__(self, index, warn=True): self.values = np.asarray(index).view('i8') if index.tz is not None: - f = lambda x: tslib.tz_convert_single(x, 'UTC', index.tz) - self.values = np.vectorize(f)(self.values) - # This cant work, because of DST - # self.values = tslib.tz_convert(self.values, 'UTC', index.tz) + self.values = _tz_convert_with_transitions(self.values,'UTC',index.tz) self.warn = warn diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 9473b10876600..d022911fe2909 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -14,7 +14,7 @@ from pandas.compat import u from pandas.tseries.frequencies import ( infer_freq, to_offset, get_period_alias, - Resolution, get_reso_string) + Resolution, get_reso_string, _tz_convert_with_transitions) from pandas.core.base import DatetimeIndexOpsMixin from pandas.tseries.offsets import DateOffset, generate_range, Tick, CDay from pandas.tseries.tools import parse_time_string, normalize_date @@ -1376,7 +1376,10 @@ def __getitem__(self, key): else: if com._is_bool_indexer(key): key = np.asarray(key) - key = lib.maybe_booleans_to_slice(key.view(np.uint8)) + if key.all(): + key = slice(0,None,None) + else: + key = lib.maybe_booleans_to_slice(key.view(np.uint8)) new_offset = None if isinstance(key, slice): @@ -1588,9 +1591,7 @@ def insert(self, loc, item): new_dates = np.concatenate((self[:loc].asi8, [item.view(np.int64)], self[loc:].asi8)) if self.tz is not None: - f = lambda x: tslib.tz_convert_single(x, 'UTC', self.tz) - new_dates = np.vectorize(f)(new_dates) - # new_dates = tslib.tz_convert(new_dates, 'UTC', self.tz) + new_dates = _tz_convert_with_transitions(new_dates,'UTC',self.tz) return DatetimeIndex(new_dates, name=self.name, freq=freq, tz=self.tz) except (AttributeError, TypeError):
``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- datetimeindex_normalize | 3.3297 | 83.0923 | 0.0401 | Ratio < 1.0 means the target commit is faster then the baseline. Seed used: 1234 Target [fc88541] : PERF: allow slice indexers to be computed faster Base [160419e] : TST: fixes for 2.6 comparisons ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7652
2014-07-02T18:02:53Z
2014-07-02T18:38:23Z
2014-07-02T18:38:23Z
2014-07-02T18:38:23Z
REGR: Add back #N/A N/A as a default NA value (regresion from 0.12) (GH5521)
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 42041cceeb81b..9392fd299b674 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -59,6 +59,7 @@ API changes object isn't a ``Period`` ``False`` is returned. (:issue:`7376`) - Bug in ``.loc`` performing fallback integer indexing with ``object`` dtype indices (:issue:`7496`) +- Add back ``#N/A N/A`` as a default NA value in text parsing, (regresion from 0.12) (:issue:`5521`) .. _whatsnew_0141.prior_deprecations: diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 3f7a8ce9b2788..0dcbdb86b9069 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -487,7 +487,7 @@ def read_fwf(filepath_or_buffer, colspecs='infer', widths=None, **kwds): # no longer excluding inf representations # '1.#INF','-1.#INF', '1.#INF000000', _NA_VALUES = set([ - '-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN', '#N/A', 'N/A', 'NA', '#NA', + '-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN', '#N/A N/A', '#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN', '-NaN', 'nan', '-nan', '' ]) diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py index b9c7621c19ab0..ab9a6f58119c2 100644 --- a/pandas/io/tests/test_parsers.py +++ b/pandas/io/tests/test_parsers.py @@ -706,7 +706,7 @@ def test_non_string_na_values(self): def test_default_na_values(self): _NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN', '#N/A','N/A', 'NA', '#NA', 'NULL', 'NaN', - 'nan', '-NaN', '-nan', '']) + 'nan', '-NaN', '-nan', '#N/A N/A','']) assert_array_equal (_NA_VALUES, parsers._NA_VALUES) nv = len(_NA_VALUES) def f(i, v):
closes #5521
https://api.github.com/repos/pandas-dev/pandas/pulls/7639
2014-07-01T15:57:47Z
2014-07-01T16:33:07Z
2014-07-01T16:33:07Z
2014-07-01T16:33:07Z
BUG: Bug in Series.get with a boolean accessor (GH7407)
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 42041cceeb81b..5a731ae3dcacf 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -173,7 +173,7 @@ Bug Fixes - Bug in timeops with non-aligned Series (:issue:`7500`) - Bug in timedelta inference when assigning an incomplete Series (:issue:`7592`) - Bug in groupby ``.nth`` with a Series and integer-like column name (:issue:`7559`) - +- Bug in ``Series.get`` with a boolean accessor (:issue:`7407`) - Bug in ``value_counts`` where ``NaT`` did not qualify as missing (``NaN``) (:issue:`7423`) - Bug in ``to_timedelta`` that accepted invalid units and misinterpreted 'm/h' (:issue:`7611`, :issue: `6423`) diff --git a/pandas/core/index.py b/pandas/core/index.py index 4d7e14c9e026f..525d17c7612a7 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -1191,7 +1191,7 @@ def get_value(self, series, key): try: return self._engine.get_value(s, k) except KeyError as e1: - if len(self) > 0 and self.inferred_type == 'integer': + if len(self) > 0 and self.inferred_type in ['integer','boolean']: raise try: diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 093954f1d8c1d..f4f8495b1dafd 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -123,6 +123,20 @@ def test_get(self): expected = 43 self.assertEqual(result,expected) + # GH 7407 + # with a boolean accessor + df = pd.DataFrame({'i':[0]*3, 'b':[False]*3}) + vc = df.i.value_counts() + result = vc.get(99,default='Missing') + self.assertEquals(result,'Missing') + + vc = df.b.value_counts() + result = vc.get(False,default='Missing') + self.assertEquals(result,3) + + result = vc.get(True,default='Missing') + self.assertEquals(result,'Missing') + def test_delitem(self): # GH 5542
closes #7407
https://api.github.com/repos/pandas-dev/pandas/pulls/7638
2014-07-01T15:39:11Z
2014-07-01T16:02:12Z
2014-07-01T16:02:12Z
2014-07-01T16:02:12Z
Backport PR #27767 on branch 0.25.x (BUG: Fix windowing over read-only arrays)
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index 943a6adb7944e..66b760a76dad3 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -118,6 +118,7 @@ Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ - Bug in :meth:`pandas.core.groupby.DataFrameGroupBy.transform` where applying a timezone conversion lambda function would drop timezone information (:issue:`27496`) +- Bug in windowing over read-only arrays (:issue:`27766`) - - diff --git a/pandas/core/window.py b/pandas/core/window.py index 86574208a3fc0..30aa2f2cea8dc 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -235,8 +235,10 @@ def _prep_values(self, values: Optional[np.ndarray] = None) -> np.ndarray: "cannot handle this type -> {0}" "".format(values.dtype) ) - # Always convert inf to nan - values[np.isinf(values)] = np.NaN + # Convert inf to nan for C funcs + inf = np.isinf(values) + if inf.any(): + values = np.where(inf, np.nan, values) return values diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index c7177e1d3914f..f0787ab3d191f 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -326,3 +326,11 @@ def test_rolling_axis_count(self, axis_frame): result = df.rolling(2, axis=axis_frame).count() tm.assert_frame_equal(result, expected) + + def test_readonly_array(self): + # GH-27766 + arr = np.array([1, 3, np.nan, 3, 5]) + arr.setflags(write=False) + result = pd.Series(arr).rolling(2).mean() + expected = pd.Series([np.nan, 2, np.nan, np.nan, 4]) + tm.assert_series_equal(result, expected)
Backport PR #27767: BUG: Fix windowing over read-only arrays
https://api.github.com/repos/pandas-dev/pandas/pulls/27782
2019-08-06T15:39:47Z
2019-08-06T20:52:55Z
2019-08-06T20:52:55Z
2019-08-06T20:52:56Z
Avoid calling S3File.s3
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index 943a6adb7944e..7d78a8fe6dd84 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -103,7 +103,7 @@ MultiIndex I/O ^^^ -- +- Avoid calling ``S3File.s3`` when reading parquet, as this was removed in s3fs version 0.3.0 (:issue:`27756`) - - diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 82c460300582b..6fc70e9f4a737 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -184,12 +184,14 @@ def write( def read(self, path, columns=None, **kwargs): if is_s3_url(path): + from pandas.io.s3 import get_file_and_filesystem + # When path is s3:// an S3File is returned. # We need to retain the original path(str) while also # pass the S3File().open function to fsatparquet impl. - s3, _, _, should_close = get_filepath_or_buffer(path) + s3, filesystem = get_file_and_filesystem(path) try: - parquet_file = self.api.ParquetFile(path, open_with=s3.s3.open) + parquet_file = self.api.ParquetFile(path, open_with=filesystem.open) finally: s3.close() else: diff --git a/pandas/io/s3.py b/pandas/io/s3.py index 0a7c082fec51c..7e0a37e8cba20 100644 --- a/pandas/io/s3.py +++ b/pandas/io/s3.py @@ -1,8 +1,11 @@ """ s3 support for remote file interactivity """ +from typing import IO, Any, Optional, Tuple from urllib.parse import urlparse as parse_url from pandas.compat._optional import import_optional_dependency +from pandas._typing import FilePathOrBuffer + s3fs = import_optional_dependency( "s3fs", extra="The s3fs package is required to handle s3 files." ) @@ -14,9 +17,9 @@ def _strip_schema(url): return result.netloc + result.path -def get_filepath_or_buffer( - filepath_or_buffer, encoding=None, compression=None, mode=None -): +def get_file_and_filesystem( + filepath_or_buffer: FilePathOrBuffer, mode: Optional[str] = None +) -> Tuple[IO, Any]: from botocore.exceptions import NoCredentialsError if mode is None: @@ -24,7 +27,7 @@ def get_filepath_or_buffer( fs = s3fs.S3FileSystem(anon=False) try: - filepath_or_buffer = fs.open(_strip_schema(filepath_or_buffer), mode) + file = fs.open(_strip_schema(filepath_or_buffer), mode) except (FileNotFoundError, NoCredentialsError): # boto3 has troubles when trying to access a public file # when credentialed... @@ -33,5 +36,15 @@ def get_filepath_or_buffer( # A NoCredentialsError is raised if you don't have creds # for that bucket. fs = s3fs.S3FileSystem(anon=True) - filepath_or_buffer = fs.open(_strip_schema(filepath_or_buffer), mode) - return filepath_or_buffer, None, compression, True + file = fs.open(_strip_schema(filepath_or_buffer), mode) + return file, fs + + +def get_filepath_or_buffer( + filepath_or_buffer: FilePathOrBuffer, + encoding: Optional[str] = None, + compression: Optional[str] = None, + mode: Optional[str] = None, +) -> Tuple[IO, Optional[str], Optional[str], bool]: + file, _fs = get_file_and_filesystem(filepath_or_buffer, mode=mode) + return file, None, compression, True
When reading from S3 using fastparquet. This attribute was removed in s3fs 0.3.0. This change avoids accessing it by using a new method `get_file_and_filesystem` which returns the filesystem in addition to the file. - [x] closes #27756 - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27777
2019-08-06T12:48:03Z
2019-08-12T19:10:50Z
2019-08-12T19:10:50Z
2019-08-12T19:10:55Z
DEPR: Removed the previously deprecated ExtensionArray._formatting_values
diff --git a/doc/source/reference/extensions.rst b/doc/source/reference/extensions.rst index 407aab4bb1f1b..78e8734e9b5ff 100644 --- a/doc/source/reference/extensions.rst +++ b/doc/source/reference/extensions.rst @@ -34,7 +34,6 @@ objects. api.extensions.ExtensionArray._concat_same_type api.extensions.ExtensionArray._formatter - api.extensions.ExtensionArray._formatting_values api.extensions.ExtensionArray._from_factorized api.extensions.ExtensionArray._from_sequence api.extensions.ExtensionArray._from_sequence_of_strings diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index c7f8bb70e3461..bca7bf8cbefbd 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -65,7 +65,7 @@ Removal of prior version deprecations/changes - Changed the the default value of `inplace` in :meth:`DataFrame.set_index` and :meth:`Series.set_axis`. It now defaults to False (:issue:`27600`) - :meth:`pandas.Series.str.cat` now defaults to aligning ``others``, using ``join='left'`` (:issue:`27611`) - :meth:`pandas.Series.str.cat` does not accept list-likes *within* list-likes anymore (:issue:`27611`) -- +- Removed the previously deprecated :meth:`ExtensionArray._formatting_values`. Use :attr:`ExtensionArray._formatter` instead. (:issue:`23601`) .. _whatsnew_1000.performance: diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index e517be4f03a16..00e1d092ffa22 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -66,7 +66,6 @@ class ExtensionArray: unique _concat_same_type _formatter - _formatting_values _from_factorized _from_sequence _from_sequence_of_strings @@ -908,21 +907,6 @@ def _formatter(self, boxed: bool = False) -> Callable[[Any], Optional[str]]: return str return repr - def _formatting_values(self) -> np.ndarray: - # At the moment, this has to be an array since we use result.dtype - """ - An array of values to be printed in, e.g. the Series repr - - .. deprecated:: 0.24.0 - - Use :meth:`ExtensionArray._formatter` instead. - - Returns - ------- - array : ndarray - """ - return np.array(self) - # ------------------------------------------------------------------------ # Reshaping # ------------------------------------------------------------------------ diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 9f3aa699cfaf4..12dcabdb0f680 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -68,13 +68,7 @@ ) import pandas.core.algorithms as algos -from pandas.core.arrays import ( - Categorical, - DatetimeArray, - ExtensionArray, - PandasDtype, - TimedeltaArray, -) +from pandas.core.arrays import Categorical, DatetimeArray, PandasDtype, TimedeltaArray from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.construction import extract_array @@ -209,10 +203,6 @@ def internal_values(self, dtype=None): """ return self.values - def formatting_values(self): - """Return the internal values used by the DataFrame/SeriesFormatter""" - return self.internal_values() - def get_values(self, dtype=None): """ return an internal format, currently just the ndarray @@ -1867,21 +1857,6 @@ def _slice(self, slicer): return self.values[slicer] - def formatting_values(self): - # Deprecating the ability to override _formatting_values. - # Do the warning here, it's only user in pandas, since we - # have to check if the subclass overrode it. - fv = getattr(type(self.values), "_formatting_values", None) - if fv and fv != ExtensionArray._formatting_values: - msg = ( - "'ExtensionArray._formatting_values' is deprecated. " - "Specify 'ExtensionArray._formatter' instead." - ) - warnings.warn(msg, FutureWarning, stacklevel=10) - return self.values._formatting_values() - - return self.values - def concat_same_type(self, to_concat, placement=None): """ Concatenate list of single blocks of the same type. diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index b30ddbc383906..1c31542daa5de 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1582,10 +1582,6 @@ def external_values(self): def internal_values(self): return self._block.internal_values() - def formatting_values(self): - """Return the internal values used by the DataFrame/SeriesFormatter""" - return self._block.formatting_values() - def get_values(self): """ return a dense type view """ return np.array(self._block.to_dense(), copy=False) diff --git a/pandas/core/series.py b/pandas/core/series.py index 9e317d365ccb8..4e64a25e430eb 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -562,13 +562,6 @@ def _values(self): """ return self._data.internal_values() - def _formatting_values(self): - """ - Return the values that can be formatted (used by SeriesFormatter - and DataFrameFormatter). - """ - return self._data.formatting_values() - def get_values(self): """ Same as values (but handles sparseness conversions); is a view. diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index d8a370d77ea31..61af935bd8227 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -336,9 +336,11 @@ def _get_formatted_index(self) -> Tuple[List[str], bool]: return fmt_index, have_header def _get_formatted_values(self) -> List[str]: - values_to_format = self.tr_series._formatting_values() return format_array( - values_to_format, None, float_format=self.float_format, na_rep=self.na_rep + self.tr_series._values, + None, + float_format=self.float_format, + na_rep=self.na_rep, ) def to_string(self) -> str: @@ -903,9 +905,8 @@ def to_latex( def _format_col(self, i: int) -> List[str]: frame = self.tr_frame formatter = self._get_formatter(i) - values_to_format = frame.iloc[:, i]._formatting_values() return format_array( - values_to_format, + frame.iloc[:, i]._values, formatter, float_format=self.float_format, na_rep=self.na_rep, diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py index 9dec023f4073a..3ac9d37ccf4f3 100644 --- a/pandas/tests/extension/decimal/test_decimal.py +++ b/pandas/tests/extension/decimal/test_decimal.py @@ -392,17 +392,6 @@ def test_ufunc_fallback(data): tm.assert_series_equal(result, expected) -def test_formatting_values_deprecated(): - class DecimalArray2(DecimalArray): - def _formatting_values(self): - return np.array(self) - - ser = pd.Series(DecimalArray2([decimal.Decimal("1.0")])) - - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - repr(ser) - - def test_array_ufunc(): a = to_decimal([1, 2, 3]) result = np.exp(a) diff --git a/pandas/tests/extension/test_external_block.py b/pandas/tests/extension/test_external_block.py index 1a4f84e2c0fd2..6311070cfe2bb 100644 --- a/pandas/tests/extension/test_external_block.py +++ b/pandas/tests/extension/test_external_block.py @@ -2,7 +2,7 @@ import pytest import pandas as pd -from pandas.core.internals import BlockManager, SingleBlockManager +from pandas.core.internals import BlockManager from pandas.core.internals.blocks import Block, NonConsolidatableMixIn @@ -10,9 +10,6 @@ class CustomBlock(NonConsolidatableMixIn, Block): _holder = np.ndarray - def formatting_values(self): - return np.array(["Val: {}".format(i) for i in self.values]) - def concat_same_type(self, to_concat, placement=None): """ Always concatenate disregarding self.ndim as the values are @@ -35,22 +32,6 @@ def df(): return pd.DataFrame(block_manager) -def test_custom_repr(): - values = np.arange(3, dtype="int64") - - # series - block = CustomBlock(values, placement=slice(0, 3)) - - s = pd.Series(SingleBlockManager(block, pd.RangeIndex(3))) - assert repr(s) == "0 Val: 0\n1 Val: 1\n2 Val: 2\ndtype: int64" - - # dataframe - block = CustomBlock(values, placement=slice(0, 1)) - blk_mgr = BlockManager([block], [["col"], range(3)]) - df = pd.DataFrame(blk_mgr) - assert repr(df) == " col\n0 Val: 0\n1 Val: 1\n2 Val: 2" - - def test_concat_series(): # GH17728 values = np.arange(3, dtype="int64")
and revert #17143, see https://github.com/pandas-dev/pandas/pull/17143#issuecomment-505080265
https://api.github.com/repos/pandas-dev/pandas/pulls/27774
2019-08-06T10:31:33Z
2019-08-07T06:23:26Z
2019-08-07T06:23:26Z
2019-08-08T14:20:03Z
BUG: _can_use_numexpr fails when passed large Series
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index dfa216b1db56e..21f8f33e2b439 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -54,7 +54,7 @@ Numeric ^^^^^^^ - Bug in :meth:`Series.interpolate` when using a timezone aware :class:`DatetimeIndex` (:issue:`27548`) - Bug when printing negative floating point complex numbers would raise an ``IndexError`` (:issue:`27484`) -- +- Bug where :class:`DataFrame` arithmetic operators such as :meth:`DataFrame.mul` with a :class:`Series` with axis=1 would raise an ``AttributeError`` on :class:`DataFrame` larger than the minimum threshold to invoke numexpr (:issue:`27636`) - Conversion diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index d9dc194d484ae..1959242a88897 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -76,16 +76,17 @@ def _can_use_numexpr(op, op_str, a, b, dtype_check): # required min elements (otherwise we are adding overhead) if np.prod(a.shape) > _MIN_ELEMENTS: - # check for dtype compatibility dtypes = set() for o in [a, b]: - if hasattr(o, "dtypes"): + # Series implements dtypes, check for dimension count as well + if hasattr(o, "dtypes") and o.ndim > 1: s = o.dtypes.value_counts() if len(s) > 1: return False dtypes |= set(s.index.astype(str)) - elif isinstance(o, np.ndarray): + # ndarray and Series Case + elif hasattr(o, "dtype"): dtypes |= {o.dtype.name} # allowed are a superset diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index 4070624985068..ca514f62f451d 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -66,7 +66,7 @@ def run_arithmetic(self, df, other, assert_func, check_dtype=False, test_flex=Tr operator_name = "truediv" if test_flex: - op = lambda x, y: getattr(df, arith)(y) + op = lambda x, y: getattr(x, arith)(y) op.__name__ = arith else: op = getattr(operator, operator_name) @@ -318,7 +318,6 @@ def testit(): for f in [self.frame, self.frame2, self.mixed, self.mixed2]: for cond in [True, False]: - c = np.empty(f.shape, dtype=np.bool_) c.fill(cond) result = expr.where(c, f.values, f.values + 1) @@ -431,3 +430,29 @@ def test_bool_ops_column_name_dtype(self, test_input, expected): # GH 22383 - .ne fails if columns containing column name 'dtype' result = test_input.loc[:, ["a", "dtype"]].ne(test_input.loc[:, ["a", "dtype"]]) assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "arith", ("add", "sub", "mul", "mod", "truediv", "floordiv") + ) + @pytest.mark.parametrize("axis", (0, 1)) + def test_frame_series_axis(self, axis, arith): + # GH#26736 Dataframe.floordiv(Series, axis=1) fails + if axis == 1 and arith == "floordiv": + pytest.xfail("'floordiv' does not succeed with axis=1 #27636") + + df = self.frame + if axis == 1: + other = self.frame.iloc[0, :] + else: + other = self.frame.iloc[:, 0] + + expr._MIN_ELEMENTS = 0 + + op_func = getattr(df, arith) + + expr.set_use_numexpr(False) + expected = op_func(other, axis=axis) + expr.set_use_numexpr(True) + + result = op_func(other, axis=axis) + assert_frame_equal(expected, result)
This fixes a regression introduced in #27145 where _can_use_numexpr would fail if passed a Series and not a DataFrame. I decided not to use run_arithmetic in the test_suite because there is a separate issue when running floordiv that is out of the scope of the fix. I will open a separate issue in improving the test coverage of "test_expressions.py" as it currently does not check any of the arguments you can pass to the operators, such as 'axis', 'level' and 'fill_value'. - [x] closes #27636 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27773
2019-08-06T10:27:57Z
2019-08-19T17:20:24Z
2019-08-19T17:20:24Z
2019-08-19T17:20:29Z
[BLD] Add script that fails build if git tags do not exist
diff --git a/.travis.yml b/.travis.yml index 9be4291d10874..79fecc41bec0d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -21,7 +21,7 @@ env: git: # for cloning - depth: 2000 + depth: false matrix: fast_finish: true @@ -63,7 +63,7 @@ before_install: - pwd - uname -a - git --version - - git tag + - ./ci/check_git_tags.sh # Because travis runs on Google Cloud and has a /etc/boto.cfg, # it breaks moto import, see: # https://github.com/spulec/moto/issues/1771 diff --git a/ci/check_git_tags.sh b/ci/check_git_tags.sh new file mode 100755 index 0000000000000..9dbcd4f98683e --- /dev/null +++ b/ci/check_git_tags.sh @@ -0,0 +1,28 @@ +set -e + +if [[ ! $(git tag) ]]; then + echo "No git tags in clone, please sync your git tags with upstream using:" + echo " git fetch --tags upstream" + echo " git push --tags origin" + echo "" + echo "If the issue persists, the clone depth needs to be increased in .travis.yml" + exit 1 +fi + +# This will error if there are no tags and we omit --always +DESCRIPTION=$(git describe --long --tags) +echo "$DESCRIPTION" + +if [[ "$DESCRIPTION" == *"untagged"* ]]; then + echo "Unable to determine most recent tag, aborting build" + exit 1 +else + if [[ "$DESCRIPTION" != *"g"* ]]; then + # A good description will have the hash prefixed by g, a bad one will be + # just the hash + echo "Unable to determine most recent tag, aborting build" + exit 1 + else + echo "$(git tag)" + fi +fi diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 479e55c86fcd1..65b2dab1b02a8 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -1,4 +1,5 @@ import collections +from distutils.version import LooseVersion from functools import partial import string @@ -117,3 +118,13 @@ def test_git_version(): git_version = pd.__git_version__ assert len(git_version) == 40 assert all(c in string.hexdigits for c in git_version) + + +def test_version_tag(): + version = pd.__version__ + try: + version > LooseVersion("0.0.1") + except TypeError: + raise ValueError( + "No git tags exist, please sync tags between upstream and your repo" + )
The Travis build process can fail in hard to decipher ways if git tags are not synced between `upstream` and a developer's repo. This failure mode would occur in the main repo if we go 2000 commits (current clone depth, ~9 months) without tagging a release. This PR explicitly fails the build with instructions of how to resolve if this situation is encountered. The underlying issue is that `versioneer` and the underlying `git describe` rely on tags to exist in the local repo for the purposes of creating a version description like: ``` v0.25.0-116-gee54d95952 ``` As Travis makes a shallow clone, these tags are will not exist if: - A developer has not explicitly synced tags between repos (the default git behavior) - `pandas` goes 2000 commits without tagging a commit If tags do not exist, we get something like: ``` 0+untagged.2000.g66ada8c ``` Which causes causes tests involving downstream libraries like `pyarrow`, `statsmodels`, etc to fail their internal version checks in sometimes indecipherable ways like this: ``` =================================== FAILURES =================================== ____________________________ TestFeather.test_error ____________________________ [gw0] linux -- Python 3.7.3 /home/travis/miniconda3/envs/pandas-dev/bin/python self = <pandas.tests.io.test_feather.TestFeather object at 0x7f353ab95c18> def test_error(self): for obj in [ pd.Series([1, 2, 3]), 1, "foo", pd.Timestamp("20130101"), np.array([1, 2, 3]), ]: > self.check_error_on_write(obj, ValueError) pandas/tests/io/test_feather.py:49: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ pandas/tests/io/test_feather.py:27: in check_error_on_write to_feather(df, path) pandas/io/feather_format.py:24: in to_feather from pyarrow import feather _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ from distutils.version import LooseVersion import os import six import pandas as pd import warnings > from pyarrow.compat import pdapi E ImportError: cannot import name 'pdapi' from 'pyarrow.compat' (/home/travis/miniconda3/envs/pandas-dev/lib/python3.7/site-packages/pyarrow/compat.py) ``` With some digging, you can eventually find that the root cause is: ``` self = LooseVersion ('0+untagged.2000.g66ada8c'), other = LooseVersion ('0.21') def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) if self.version == other.version: return 0 > if self.version < other.version: E TypeError: '<' not supported between instances of 'str' and 'int' ``` Syncing tags between repos resolves the issue. - [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27770
2019-08-06T03:53:13Z
2019-08-07T13:27:04Z
2019-08-07T13:27:04Z
2019-08-07T13:27:04Z
REF: Make CategoricalIndex comparison defer to Categorical comparison
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index d22b4bd4d3f2b..984f1835bd078 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -89,6 +89,9 @@ def f(self, other): return NotImplemented other = lib.item_from_zerodim(other) + if is_list_like(other) and len(other) != len(self): + # TODO: Could this fail if the categories are listlike objects? + raise ValueError("Lengths must match.") if not self.ordered: if op in ["__lt__", "__gt__", "__le__", "__ge__"]: diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 356ae20b2240a..bd998656914c6 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -48,6 +48,7 @@ ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ( + ABCCategorical, ABCDataFrame, ABCDateOffset, ABCDatetimeArray, @@ -99,11 +100,14 @@ def _make_comparison_op(op, cls): def cmp_method(self, other): - if isinstance(other, (np.ndarray, Index, ABCSeries)): + if isinstance(other, (np.ndarray, Index, ABCSeries, ExtensionArray)): if other.ndim > 0 and len(self) != len(other): raise ValueError("Lengths must match to compare") - if is_object_dtype(self) and not isinstance(self, ABCMultiIndex): + if is_object_dtype(self) and isinstance(other, ABCCategorical): + left = type(other)(self._values, dtype=other.dtype) + return op(left, other) + elif is_object_dtype(self) and not isinstance(self, ABCMultiIndex): # don't pass MultiIndex with np.errstate(all="ignore"): result = ops._comp_method_OBJECT_ARRAY(op, self.values, other) diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 0f6aa711adc90..8bfa7e8d20b4f 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -899,31 +899,12 @@ def _make_compare(op): opname = "__{op}__".format(op=op.__name__) def _evaluate_compare(self, other): - - # if we have a Categorical type, then must have the same - # categories - if isinstance(other, CategoricalIndex): - other = other._values - elif isinstance(other, Index): - other = self._create_categorical(other._values, dtype=self.dtype) - - if isinstance(other, (ABCCategorical, np.ndarray, ABCSeries)): - if len(self.values) != len(other): - raise ValueError("Lengths must match to compare") - - if isinstance(other, ABCCategorical): - if not self.values.is_dtype_equal(other): - raise TypeError( - "categorical index comparisons must " - "have the same categories and ordered " - "attributes" - ) - - result = op(self.values, other) + with np.errstate(all="ignore"): + result = op(self.array, other) if isinstance(result, ABCSeries): # Dispatch to pd.Categorical returned NotImplemented # and we got a Series back; down-cast to ndarray - result = result.values + result = result._values return result return compat.set_function_name(_evaluate_compare, opname, cls) diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 4ab1941e3493f..c78d5c79453ab 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -1038,8 +1038,14 @@ def wrapper(self, other, axis=None): # Defer to DataFrame implementation; fail early return NotImplemented - elif isinstance(other, ABCSeries) and not self._indexed_same(other): + if isinstance(other, ABCSeries) and not self._indexed_same(other): raise ValueError("Can only compare identically-labeled Series objects") + elif ( + is_list_like(other) + and len(other) != len(self) + and not isinstance(other, (set, frozenset)) + ): + raise ValueError("Lengths must match") elif is_categorical_dtype(self): # Dispatch to Categorical implementation; CategoricalIndex diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index 280b0a99c7e68..67bf9bd20e716 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -823,6 +823,11 @@ def test_equals_categorical(self): msg = ( "categorical index comparisons must have the same categories" " and ordered attributes" + "|" + "Categoricals can only be compared if 'categories' are the same. " + "Categories are different lengths" + "|" + "Categoricals can only be compared if 'ordered' is the same" ) with pytest.raises(TypeError, match=msg): ci1 == ci2
Partially addresses #19513. After this, CategoricalIndex will be defining comparison ops identically to DTA/TDA/PA, could share some code.
https://api.github.com/repos/pandas-dev/pandas/pulls/27769
2019-08-06T02:22:00Z
2019-08-13T12:05:44Z
2019-08-13T12:05:44Z
2019-08-14T21:37:58Z
CLN: short-circuit case in Block.replace
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 9f3aa699cfaf4..8779bf8ca5b54 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -772,8 +772,11 @@ def replace( # If we cannot replace with own dtype, convert to ObjectBlock and # retry if not self._can_hold_element(to_replace): - # TODO: we should be able to infer at this point that there is - # nothing to replace + if not isinstance(to_replace, list): + if inplace: + return [self] + return [self.copy()] + # GH 22083, TypeError or ValueError occurred within error handling # causes infinite loop. Cast and retry only if not objectblock. if is_object_dtype(self): @@ -798,14 +801,27 @@ def replace( filtered_out = ~self.mgr_locs.isin(filter) mask[filtered_out.nonzero()[0]] = False + if not mask.any(): + if inplace: + return [self] + return [self.copy()] + try: blocks = self.putmask(mask, value, inplace=inplace) + # Note: it is _not_ the case that self._can_hold_element(value) + # is always true at this point. In particular, that can fail + # for: + # "2u" with bool-dtype, float-dtype + # 0.5 with int64-dtype + # np.nan with int64-dtype except (TypeError, ValueError): # GH 22083, TypeError or ValueError occurred within error handling # causes infinite loop. Cast and retry only if not objectblock. if is_object_dtype(self): raise + assert not self._can_hold_element(value), value + # try again with a compatible block block = self.astype(object) return block.replace( @@ -960,6 +976,7 @@ def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False) # if we are passed a scalar None, convert it here if not is_list_like(new) and isna(new) and not self.is_object: + # FIXME: make sure we have compatible NA new = self.fill_value if self._can_hold_element(new):
Running out of try/excepts to get rid of
https://api.github.com/repos/pandas-dev/pandas/pulls/27768
2019-08-06T01:23:57Z
2019-08-12T18:58:43Z
2019-08-12T18:58:43Z
2019-08-12T19:04:19Z
BUG: Fix windowing over read-only arrays
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index 943a6adb7944e..66b760a76dad3 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -118,6 +118,7 @@ Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ - Bug in :meth:`pandas.core.groupby.DataFrameGroupBy.transform` where applying a timezone conversion lambda function would drop timezone information (:issue:`27496`) +- Bug in windowing over read-only arrays (:issue:`27766`) - - diff --git a/pandas/core/window.py b/pandas/core/window.py index a7425bc1466c3..3e3f17369db7b 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -246,8 +246,10 @@ def _prep_values(self, values: Optional[np.ndarray] = None) -> np.ndarray: except (ValueError, TypeError): raise TypeError("cannot handle this type -> {0}".format(values.dtype)) - # Always convert inf to nan - values[np.isinf(values)] = np.NaN + # Convert inf to nan for C funcs + inf = np.isinf(values) + if inf.any(): + values = np.where(inf, np.nan, values) return values diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index c7177e1d3914f..f0787ab3d191f 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -326,3 +326,11 @@ def test_rolling_axis_count(self, axis_frame): result = df.rolling(2, axis=axis_frame).count() tm.assert_frame_equal(result, expected) + + def test_readonly_array(self): + # GH-27766 + arr = np.array([1, 3, np.nan, 3, 5]) + arr.setflags(write=False) + result = pd.Series(arr).rolling(2).mean() + expected = pd.Series([np.nan, 2, np.nan, np.nan, 4]) + tm.assert_series_equal(result, expected)
- [x] closes #27766 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27767
2019-08-06T00:42:26Z
2019-08-06T15:39:20Z
2019-08-06T15:39:20Z
2019-08-06T15:39:34Z
TYPING: more type hints for io.formats.printing
diff --git a/pandas/io/formats/printing.py b/pandas/io/formats/printing.py index 4ec9094ce4abe..ead51693da791 100644 --- a/pandas/io/formats/printing.py +++ b/pandas/io/formats/printing.py @@ -3,12 +3,14 @@ """ import sys -from typing import Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union +from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union from pandas._config import get_option from pandas.core.dtypes.inference import is_sequence +EscapeChars = Union[Dict[str, str], Iterable[str]] + def adjoin(space: int, *lists: List[str], **kwargs) -> str: """ @@ -148,19 +150,16 @@ def _pprint_dict( def pprint_thing( - thing, + thing: Any, _nest_lvl: int = 0, - escape_chars: Optional[Union[Dict[str, str], Iterable[str]]] = None, + escape_chars: Optional[EscapeChars] = None, default_escapes: bool = False, quote_strings: bool = False, max_seq_items: Optional[int] = None, ) -> str: """ This function is the sanctioned way of converting objects - to a unicode representation. - - properly handles nested sequences containing unicode strings - (unicode(object) does not) + to a string representation and properly handles nested sequences. Parameters ---------- @@ -178,21 +177,13 @@ def pprint_thing( Returns ------- - result - unicode str + str """ - def as_escaped_unicode(thing, escape_chars=escape_chars): - # Unicode is fine, else we try to decode using utf-8 and 'replace' - # if that's not it either, we have no way of knowing and the user - # should deal with it himself. - - try: - result = str(thing) # we should try this first - except UnicodeDecodeError: - # either utf-8 or we replace errors - result = str(thing).decode("utf-8", "replace") - + def as_escaped_string( + thing: Any, escape_chars: Optional[EscapeChars] = escape_chars + ) -> str: translate = {"\t": r"\t", "\n": r"\n", "\r": r"\r"} if isinstance(escape_chars, dict): if default_escapes: @@ -202,10 +193,11 @@ def as_escaped_unicode(thing, escape_chars=escape_chars): escape_chars = list(escape_chars.keys()) else: escape_chars = escape_chars or tuple() + + result = str(thing) for c in escape_chars: result = result.replace(c, translate[c]) - - return str(result) + return result if hasattr(thing, "__next__"): return str(thing) @@ -224,11 +216,11 @@ def as_escaped_unicode(thing, escape_chars=escape_chars): max_seq_items=max_seq_items, ) elif isinstance(thing, str) and quote_strings: - result = "'{thing}'".format(thing=as_escaped_unicode(thing)) + result = "'{thing}'".format(thing=as_escaped_string(thing)) else: - result = as_escaped_unicode(thing) + result = as_escaped_string(thing) - return str(result) # always unicode + return result def pprint_thing_encoded(
xref #27568 ``` $ mypy pandas/io/formats/printing.py --check-untyped-defs --follow-imports skip pandas\io\formats\printing.py:194: error: "str" has no attribute "decode"; maybe "encode"? ```
https://api.github.com/repos/pandas-dev/pandas/pulls/27765
2019-08-05T21:37:03Z
2019-08-23T22:36:59Z
2019-08-23T22:36:59Z
2019-08-24T07:45:54Z
CLN/REF: Remove _try_cast_result, _try_coerce_and_cast_result
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 2ad85903b916b..ea2bd22cccc3d 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -21,7 +21,11 @@ from pandas.errors import AbstractMethodError from pandas.util._decorators import Appender, Substitution -from pandas.core.dtypes.cast import maybe_convert_objects, maybe_downcast_to_dtype +from pandas.core.dtypes.cast import ( + maybe_convert_objects, + maybe_downcast_numeric, + maybe_downcast_to_dtype, +) from pandas.core.dtypes.common import ( ensure_int64, ensure_platform_int, @@ -180,10 +184,8 @@ def _cython_agg_blocks(self, how, alt=None, numeric_only=True, min_count=-1): continue finally: if result is not no_result: - dtype = block.values.dtype - # see if we can cast the block back to the original dtype - result = block._try_coerce_and_cast_result(result, dtype=dtype) + result = maybe_downcast_numeric(result, block.dtype) newb = block.make_block(result) new_items.append(locs) diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 676f243c9c8d3..b0c629f017dd3 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -591,6 +591,8 @@ def _cython_operation(self, kind, values, how, axis, min_count=-1, **kwargs): if is_datetime64tz_dtype(orig_values.dtype): result = type(orig_values)(result.astype(np.int64), dtype=orig_values.dtype) + elif is_datetimelike and kind == "aggregate": + result = result.astype(orig_values.dtype) return result, names diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 9f3aa699cfaf4..8c3cf7cc51495 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -18,6 +18,7 @@ find_common_type, infer_dtype_from, infer_dtype_from_scalar, + maybe_downcast_numeric, maybe_downcast_to_dtype, maybe_infer_dtype_type, maybe_promote, @@ -55,7 +56,6 @@ ABCDataFrame, ABCDatetimeIndex, ABCExtensionArray, - ABCIndexClass, ABCPandasArray, ABCSeries, ) @@ -685,28 +685,6 @@ def _can_hold_element(self, element): return issubclass(tipo.type, dtype) return isinstance(element, dtype) - def _try_cast_result(self, result, dtype=None): - """ try to cast the result to our original type, we may have - roundtripped thru object in the mean-time - """ - if dtype is None: - dtype = self.dtype - - if self.is_integer or self.is_bool or self.is_datetime: - pass - elif self.is_float and result.dtype == self.dtype: - # protect against a bool/object showing up here - if isinstance(dtype, str) and dtype == "infer": - return result - - # This is only reached via Block.setitem, where dtype is always - # either "infer", self.dtype, or values.dtype. - assert dtype == self.dtype, (dtype, self.dtype) - return result - - # may need to change the dtype here - return maybe_downcast_to_dtype(result, dtype) - def _try_coerce_args(self, other): """ provide coercion to our input arguments """ @@ -729,10 +707,6 @@ def _try_coerce_args(self, other): return other - def _try_coerce_and_cast_result(self, result, dtype=None): - result = self._try_cast_result(result, dtype=dtype) - return result - def to_native_types(self, slicer=None, na_rep="nan", quoting=None, **kwargs): """ convert to our native types format, slicing if desired """ @@ -925,8 +899,6 @@ def setitem(self, indexer, value): else: values[indexer] = value - # coerce and try to infer the dtypes of the result - values = self._try_coerce_and_cast_result(values, dtype) if transpose: values = values.T block = self.make_block(values) @@ -1444,10 +1416,6 @@ def func(cond, values, other): if transpose: result = result.T - # try to cast if requested - if try_cast: - result = self._try_cast_result(result) - return [self.make_block(result)] # might need to separate out blocks @@ -1459,7 +1427,7 @@ def func(cond, values, other): for m in [mask, ~mask]: if m.any(): taken = result.take(m.nonzero()[0], axis=axis) - r = self._try_cast_result(taken) + r = maybe_downcast_numeric(taken, self.dtype) nb = self.make_block(r.T, placement=self.mgr_locs[m]) result_blocks.append(nb) @@ -1692,9 +1660,6 @@ def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False) new_values[mask] = new return [self.make_block(values=new_values)] - def _try_cast_result(self, result, dtype=None): - return result - def _get_unstack_items(self, unstacker, new_columns): """ Get the placement, values, and mask for a Block unstack. @@ -1746,7 +1711,8 @@ def __init__(self, values, placement, ndim=None): super().__init__(values, placement, ndim) def _maybe_coerce_values(self, values): - """Unbox to an extension array. + """ + Unbox to an extension array. This will unbox an ExtensionArray stored in an Index or Series. ExtensionArrays pass through. No dtype coercion is done. @@ -1759,9 +1725,7 @@ def _maybe_coerce_values(self, values): ------- ExtensionArray """ - if isinstance(values, (ABCIndexClass, ABCSeries)): - values = values._values - return values + return extract_array(values) @property def _holder(self):
This finishes getting the post-call casting/coersion out of the Blocks. Pre-call coercion is still in there, exclusively for TimedeltaBlock and DatetimeBlock, i.e. will not be necessary if/when those are backed by EA.
https://api.github.com/repos/pandas-dev/pandas/pulls/27764
2019-08-05T21:37:00Z
2019-08-06T15:40:40Z
2019-08-06T15:40:40Z
2019-08-06T16:03:44Z
Backport PR #27733 on branch 0.25.x (BUG: fix to_datetime(dti, utc=True))
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index 4d9ee4c676759..943a6adb7944e 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -31,7 +31,7 @@ Categorical Datetimelike ^^^^^^^^^^^^ - +- Bug in :func:`to_datetime` where passing a timezone-naive :class:`DatetimeArray` or :class:`DatetimeIndex` and ``utc=True`` would incorrectly return a timezone-naive result (:issue:`27733`) - - - diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index e9d2c3f07bfae..0c41d8a8050e6 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -334,6 +334,9 @@ def _convert_listlike_datetimes( return DatetimeIndex(arg, tz=tz, name=name) except ValueError: pass + elif tz: + # DatetimeArray, DatetimeIndex + return arg.tz_localize(tz) return arg diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index 8db15709da35d..9af0f47f6dce9 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -1620,6 +1620,18 @@ def test_dayfirst(self, cache): tm.assert_index_equal(expected, idx5) tm.assert_index_equal(expected, idx6) + @pytest.mark.parametrize("klass", [DatetimeIndex, DatetimeArray]) + def test_to_datetime_dta_tz(self, klass): + # GH#27733 + dti = date_range("2015-04-05", periods=3).rename("foo") + expected = dti.tz_localize("UTC") + + obj = klass(dti) + expected = klass(expected) + + result = to_datetime(obj, utc=True) + tm.assert_equal(result, expected) + class TestGuessDatetimeFormat: @td.skip_if_not_us_locale
Backport PR #27733: BUG: fix to_datetime(dti, utc=True)
https://api.github.com/repos/pandas-dev/pandas/pulls/27763
2019-08-05T20:29:30Z
2019-08-06T15:33:38Z
2019-08-06T15:33:38Z
2019-08-06T15:33:38Z
TST: missed from #27720
diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py index ed80e249220fd..05b58b0eca9b8 100644 --- a/pandas/tests/indexing/test_coercion.py +++ b/pandas/tests/indexing/test_coercion.py @@ -1038,10 +1038,6 @@ def test_replace_series(self, how, to_key, from_key): "from_key", ["datetime64[ns, UTC]", "datetime64[ns, US/Eastern]"] ) def test_replace_series_datetime_tz(self, how, to_key, from_key): - how = "series" - from_key = "datetime64[ns, US/Eastern]" - to_key = "timedelta64[ns]" - index = pd.Index([3, 4], name="xyz") obj = pd.Series(self.rep[from_key], index=index, name="yyy") assert obj.dtype == from_key
pointed out by @simonjayhawkins
https://api.github.com/repos/pandas-dev/pandas/pulls/27759
2019-08-05T14:20:16Z
2019-08-05T15:03:45Z
2019-08-05T15:03:45Z
2019-08-05T16:44:44Z
Slightly rephrase SPSS doc
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index 8e5352c337072..947bf15a49c7a 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -5491,30 +5491,29 @@ The top-level function :func:`read_spss` can read (but not write) SPSS `sav` (.sav) and `zsav` (.zsav) format files. SPSS files contain column names. By default the -whole file is read, categorical columns are converted into ``pd.Categorical`` +whole file is read, categorical columns are converted into ``pd.Categorical``, and a ``DataFrame`` with all columns is returned. -Specify a ``usecols`` to obtain a subset of columns. Specify ``convert_categoricals=False`` +Specify the ``usecols`` parameter to obtain a subset of columns. Specify ``convert_categoricals=False`` to avoid converting categorical columns into ``pd.Categorical``. -Read a spss file: +Read an SPSS file: .. code-block:: python - df = pd.read_spss('spss_data.zsav') + df = pd.read_spss('spss_data.sav') -Extract a subset of columns ``usecols`` from SPSS file and +Extract a subset of columns contained in ``usecols`` from an SPSS file and avoid converting categorical columns into ``pd.Categorical``: .. code-block:: python - df = pd.read_spss('spss_data.zsav', usecols=['foo', 'bar'], + df = pd.read_spss('spss_data.sav', usecols=['foo', 'bar'], convert_categoricals=False) -More info_ about the sav and zsav file format is available from the IBM -web site. +More information about the `sav` and `zsav` file format is available here_. -.. _info: https://www.ibm.com/support/knowledgecenter/en/SSLVMB_22.0.0/com.ibm.spss.statistics.help/spss/base/savedatatypes.htm +.. _here: https://www.ibm.com/support/knowledgecenter/en/SSLVMB_22.0.0/com.ibm.spss.statistics.help/spss/base/savedatatypes.htm .. _io.other:
As a follow-up to #27594 (which I couldn't review in due time), here are some minor improvements for the SPSS user guide text.
https://api.github.com/repos/pandas-dev/pandas/pulls/27754
2019-08-05T12:15:09Z
2019-08-05T13:27:42Z
2019-08-05T13:27:42Z
2019-08-05T13:29:11Z
Backport PR #27720 on branch 0.25.x (BUG: fix replace_list)
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index b5bd83fd17530..4d9ee4c676759 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -152,7 +152,7 @@ ExtensionArray Other ^^^^^ - +- Bug in :meth:`Series.replace` and :meth:`DataFrame.replace` when replacing timezone-aware timestamps using a dict-like replacer (:issue:`27720`) - - - diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 19f126c36cde7..9aced760725be 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6684,9 +6684,8 @@ def replace( else: # need a non-zero len on all axes - for a in self._AXIS_ORDERS: - if not len(self._get_axis(a)): - return self + if not self.size: + return self new_data = self._data if is_dict_like(to_replace): diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 2e7280eeae0e2..c47f3909973ac 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -7,7 +7,7 @@ import numpy as np -from pandas._libs import internals as libinternals, lib +from pandas._libs import Timedelta, Timestamp, internals as libinternals, lib from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.cast import ( @@ -602,9 +602,10 @@ def comp(s, regex=False): """ if isna(s): return isna(values) - if hasattr(s, "asm8"): + if isinstance(s, (Timedelta, Timestamp)) and getattr(s, "tz", None) is None: + return _compare_or_regex_search( - maybe_convert_objects(values), getattr(s, "asm8"), regex + maybe_convert_objects(values), s.asm8, regex ) return _compare_or_regex_search(values, s, regex) diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py index dea1d5114f1b9..ed80e249220fd 100644 --- a/pandas/tests/indexing/test_coercion.py +++ b/pandas/tests/indexing/test_coercion.py @@ -1029,22 +1029,20 @@ def test_replace_series(self, how, to_key, from_key): tm.assert_series_equal(result, exp) - # TODO(jbrockmendel) commented out to only have a single xfail printed - @pytest.mark.xfail( - reason="GH #18376, tzawareness-compat bug in BlockManager.replace_list" + @pytest.mark.parametrize("how", ["dict", "series"]) + @pytest.mark.parametrize( + "to_key", + ["timedelta64[ns]", "bool", "object", "complex128", "float64", "int64"], ) - # @pytest.mark.parametrize('how', ['dict', 'series']) - # @pytest.mark.parametrize('to_key', ['timedelta64[ns]', 'bool', 'object', - # 'complex128', 'float64', 'int64']) - # @pytest.mark.parametrize('from_key', ['datetime64[ns, UTC]', - # 'datetime64[ns, US/Eastern]']) - # def test_replace_series_datetime_tz(self, how, to_key, from_key): - def test_replace_series_datetime_tz(self): + @pytest.mark.parametrize( + "from_key", ["datetime64[ns, UTC]", "datetime64[ns, US/Eastern]"] + ) + def test_replace_series_datetime_tz(self, how, to_key, from_key): how = "series" from_key = "datetime64[ns, US/Eastern]" to_key = "timedelta64[ns]" - index = pd.Index([3, 4], name="xxx") + index = pd.Index([3, 4], name="xyz") obj = pd.Series(self.rep[from_key], index=index, name="yyy") assert obj.dtype == from_key @@ -1061,24 +1059,17 @@ def test_replace_series_datetime_tz(self): tm.assert_series_equal(result, exp) - # TODO(jreback) commented out to only have a single xfail printed - @pytest.mark.xfail( - reason="different tz, currently mask_missing raises SystemError", strict=False + @pytest.mark.parametrize("how", ["dict", "series"]) + @pytest.mark.parametrize( + "to_key", + ["datetime64[ns]", "datetime64[ns, UTC]", "datetime64[ns, US/Eastern]"], ) - # @pytest.mark.parametrize('how', ['dict', 'series']) - # @pytest.mark.parametrize('to_key', [ - # 'datetime64[ns]', 'datetime64[ns, UTC]', - # 'datetime64[ns, US/Eastern]']) - # @pytest.mark.parametrize('from_key', [ - # 'datetime64[ns]', 'datetime64[ns, UTC]', - # 'datetime64[ns, US/Eastern]']) - # def test_replace_series_datetime_datetime(self, how, to_key, from_key): - def test_replace_series_datetime_datetime(self): - how = "dict" - to_key = "datetime64[ns]" - from_key = "datetime64[ns]" - - index = pd.Index([3, 4], name="xxx") + @pytest.mark.parametrize( + "from_key", + ["datetime64[ns]", "datetime64[ns, UTC]", "datetime64[ns, US/Eastern]"], + ) + def test_replace_series_datetime_datetime(self, how, to_key, from_key): + index = pd.Index([3, 4], name="xyz") obj = pd.Series(self.rep[from_key], index=index, name="yyy") assert obj.dtype == from_key
Backport PR #27720: BUG: fix replace_list
https://api.github.com/repos/pandas-dev/pandas/pulls/27753
2019-08-05T11:58:58Z
2019-08-05T12:56:44Z
2019-08-05T12:56:44Z
2019-08-05T12:56:44Z
Backport PR #27715 on branch 0.25.x (TST: troubleshoot inconsistent xfails)
diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index c9597505fa596..5ecd641fc68be 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -11,6 +11,7 @@ import struct import sys +PY35 = sys.version_info[:2] == (3, 5) PY36 = sys.version_info >= (3, 6) PY37 = sys.version_info >= (3, 7) PYPY = platform.python_implementation() == "PyPy" diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py index 6037273450a1c..5a1699c9292ef 100644 --- a/pandas/tests/arithmetic/test_datetime64.py +++ b/pandas/tests/arithmetic/test_datetime64.py @@ -705,6 +705,7 @@ def test_comparison_tzawareness_compat_scalars(self, op, box_with_array): # Raising in __eq__ will fallback to NumPy, which warns, fails, # then re-raises the original exception. So we just need to ignore. @pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning") + @pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning") def test_scalar_comparison_tzawareness( self, op, other, tz_aware_fixture, box_with_array ): diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index 8c0930c044838..c500760fa1390 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -1789,9 +1789,10 @@ def test_result_types(self): self.check_result_type(np.float32, np.float32) self.check_result_type(np.float64, np.float64) - def test_result_types2(self): + @td.skip_if_windows + def test_result_complex128(self): # xref https://github.com/pandas-dev/pandas/issues/12293 - pytest.skip("unreliable tests on complex128") + # this fails on Windows, apparently a floating point precision issue # Did not test complex64 because DataFrame is converting it to # complex128. Due to https://github.com/pandas-dev/pandas/issues/10952 diff --git a/pandas/tests/extension/test_datetime.py b/pandas/tests/extension/test_datetime.py index 9a7a43cff0c27..a60607d586ada 100644 --- a/pandas/tests/extension/test_datetime.py +++ b/pandas/tests/extension/test_datetime.py @@ -142,16 +142,6 @@ def test_divmod_series_array(self): # skipping because it is not implemented pass - @pytest.mark.xfail(reason="different implementation", strict=False) - def test_direct_arith_with_series_returns_not_implemented(self, data): - # Right now, we have trouble with this. Returning NotImplemented - # fails other tests like - # tests/arithmetic/test_datetime64::TestTimestampSeriesArithmetic:: - # test_dt64_seris_add_intlike - return super( - TestArithmeticOps, self - ).test_direct_arith_with_series_returns_not_implemented(data) - class TestCasting(BaseDatetimeTests, base.BaseCastingTests): pass @@ -163,12 +153,6 @@ def _compare_other(self, s, data, op_name, other): # with (some) integers, depending on the value. pass - @pytest.mark.xfail(reason="different implementation", strict=False) - def test_direct_arith_with_series_returns_not_implemented(self, data): - return super( - TestComparisonOps, self - ).test_direct_arith_with_series_returns_not_implemented(data) - class TestMissing(BaseDatetimeTests, base.BaseMissingTests): pass diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index d5c66f0c1dd64..e99208ac78e15 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -1819,10 +1819,17 @@ def test_any_all_bool_only(self): (np.any, {"A": pd.Series([0, 1], dtype="category")}, True), (np.all, {"A": pd.Series([1, 2], dtype="category")}, True), (np.any, {"A": pd.Series([1, 2], dtype="category")}, True), - # # Mix - # GH 21484 - # (np.all, {'A': pd.Series([10, 20], dtype='M8[ns]'), - # 'B': pd.Series([10, 20], dtype='m8[ns]')}, True), + # Mix GH#21484 + pytest.param( + np.all, + { + "A": pd.Series([10, 20], dtype="M8[ns]"), + "B": pd.Series([10, 20], dtype="m8[ns]"), + }, + True, + # In 1.13.3 and 1.14 np.all(df) returns a Timedelta here + marks=[td.skip_if_np_lt("1.15")], + ), ], ) def test_any_all_np_func(self, func, data, expected): diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 486b3b28b29a3..9b8c8e6d8a077 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -4,8 +4,6 @@ import numpy as np import pytest -from pandas.compat import PY37 - import pandas as pd from pandas import ( Categorical, @@ -209,7 +207,7 @@ def test_level_get_group(observed): assert_frame_equal(result, expected) -@pytest.mark.xfail(PY37, reason="flaky on 3.7, xref gh-21636", strict=False) +# GH#21636 previously flaky on py37 @pytest.mark.parametrize("ordered", [True, False]) def test_apply(ordered): # GH 10138 diff --git a/pandas/tests/indexes/datetimes/test_construction.py b/pandas/tests/indexes/datetimes/test_construction.py index 66a22ae7e9e46..88bc11c588673 100644 --- a/pandas/tests/indexes/datetimes/test_construction.py +++ b/pandas/tests/indexes/datetimes/test_construction.py @@ -759,6 +759,8 @@ def test_constructor_with_int_tz(self, klass, box, tz, dtype): assert result == expected # This is the desired future behavior + # Note: this xfail is not strict because the test passes with + # None or any of the UTC variants for tz_naive_fixture @pytest.mark.xfail(reason="Future behavior", strict=False) @pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning") def test_construction_int_rountrip(self, tz_naive_fixture): @@ -766,7 +768,7 @@ def test_construction_int_rountrip(self, tz_naive_fixture): # TODO(GH-24559): Remove xfail tz = tz_naive_fixture result = 1293858000000000000 - expected = DatetimeIndex([1293858000000000000], tz=tz).asi8[0] + expected = DatetimeIndex([result], tz=tz).asi8[0] assert result == expected def test_construction_from_replaced_timestamps_with_dst(self): diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index 10d422e8aa52c..8db15709da35d 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -741,10 +741,7 @@ def test_to_datetime_tz_psycopg2(self, cache): ) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize( - "cache", - [pytest.param(True, marks=pytest.mark.skipif(True, reason="GH 18111")), False], - ) + @pytest.mark.parametrize("cache", [True, False]) def test_datetime_bool(self, cache): # GH13176 with pytest.raises(TypeError): diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py index c6485ff21bcfb..ee236a8253b01 100644 --- a/pandas/tests/io/formats/test_to_csv.py +++ b/pandas/tests/io/formats/test_to_csv.py @@ -340,7 +340,6 @@ def test_to_csv_string_array_ascii(self): with open(path, "r") as f: assert f.read() == expected_ascii - @pytest.mark.xfail(strict=False) def test_to_csv_string_array_utf8(self): # GH 10813 str_array = [{"names": ["foo", "bar"]}, {"names": ["baz", "qux"]}] diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index a04fb9fd50257..d634859e72d7b 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -33,6 +33,10 @@ except ImportError: _HAVE_FASTPARQUET = False +pytestmark = pytest.mark.filterwarnings( + "ignore:RangeIndex.* is deprecated:DeprecationWarning" +) + # setup engines & skips @pytest.fixture( @@ -408,8 +412,6 @@ def test_basic(self, pa, df_full): check_round_trip(df, pa) - # TODO: This doesn't fail on all systems; track down which - @pytest.mark.xfail(reason="pyarrow fails on this (ARROW-1883)", strict=False) def test_basic_subset_columns(self, pa, df_full): # GH18628 diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index e3bc3d452f038..69070ea11e478 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -1098,7 +1098,6 @@ def test_time(self): assert xp == rs @pytest.mark.slow - @pytest.mark.xfail(strict=False, reason="Unreliable test") def test_time_change_xlim(self): t = datetime(1, 1, 1, 3, 30, 0) deltas = np.random.randint(1, 20, 3).cumsum() diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py index 4404b93e86218..b57b817461788 100644 --- a/pandas/tests/scalar/period/test_period.py +++ b/pandas/tests/scalar/period/test_period.py @@ -10,6 +10,7 @@ from pandas._libs.tslibs.parsing import DateParseError from pandas._libs.tslibs.period import IncompatibleFrequency from pandas._libs.tslibs.timezones import dateutil_gettz, maybe_get_tz +from pandas.compat import PY35 from pandas.compat.numpy import np_datetime64_compat import pandas as pd @@ -1579,8 +1580,9 @@ def test_period_immutable(): per.freq = 2 * freq -# TODO: This doesn't fail on all systems; track down which -@pytest.mark.xfail(reason="Parses as Jan 1, 0007 on some systems", strict=False) +@pytest.mark.xfail( + PY35, reason="Parsing as Period('0007-01-01', 'D') for reasons unknown", strict=True +) def test_small_year_parsing(): per1 = Period("0001-01-07", "D") assert per1.year == 1 diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 32d32a5d14fb2..3a5a387b919be 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -1489,7 +1489,7 @@ def test_value_counts_with_nan(self): "unicode_", "timedelta64[h]", pytest.param( - "datetime64[D]", marks=pytest.mark.xfail(reason="GH#7996", strict=False) + "datetime64[D]", marks=pytest.mark.xfail(reason="GH#7996", strict=True) ), ], ) diff --git a/pandas/tests/sparse/test_combine_concat.py b/pandas/tests/sparse/test_combine_concat.py index d7295c4bfe5f0..c553cd3fd1a7a 100644 --- a/pandas/tests/sparse/test_combine_concat.py +++ b/pandas/tests/sparse/test_combine_concat.py @@ -440,7 +440,7 @@ def test_concat_sparse_dense_rows(self, fill_value, sparse_idx, dense_idx): "fill_value,sparse_idx,dense_idx", itertools.product([None, 0, 1, np.nan], [0, 1], [1, 0]), ) - @pytest.mark.xfail(reason="The iloc fails and I can't make expected", strict=False) + @pytest.mark.xfail(reason="The iloc fails and I can't make expected", strict=True) def test_concat_sparse_dense_cols(self, fill_value, sparse_idx, dense_idx): # See GH16874, GH18914 and #18686 for why this should be a DataFrame from pandas.core.dtypes.common import is_sparse diff --git a/pandas/tests/sparse/test_pivot.py b/pandas/tests/sparse/test_pivot.py index 85b899dfe76d5..880c1c55f9f79 100644 --- a/pandas/tests/sparse/test_pivot.py +++ b/pandas/tests/sparse/test_pivot.py @@ -2,7 +2,6 @@ import pytest import pandas as pd -from pandas import _np_version_under1p17 import pandas.util.testing as tm @@ -49,11 +48,6 @@ def test_pivot_table_with_nans(self): ) tm.assert_frame_equal(res_sparse, res_dense) - @pytest.mark.xfail( - not _np_version_under1p17, - reason="failing occasionally on numpy > 1.17", - strict=False, - ) def test_pivot_table_multi(self): res_sparse = pd.pivot_table( self.sparse, index="A", columns="B", values=["D", "E"]
Backport PR #27715: TST: troubleshoot inconsistent xfails
https://api.github.com/repos/pandas-dev/pandas/pulls/27752
2019-08-05T11:55:58Z
2019-08-05T12:56:32Z
2019-08-05T12:56:32Z
2019-08-05T12:56:32Z
REF: combine dispatch_to_index_op into dispatch_to_extension_op
diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 01bfbed1aab4c..261660dda6fdd 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -37,6 +37,7 @@ from pandas.core.dtypes.generic import ( ABCDataFrame, ABCDatetimeArray, + ABCDatetimeIndex, ABCIndex, ABCIndexClass, ABCSeries, @@ -90,7 +91,7 @@ def get_op_result_name(left, right): name : object Usually a string """ - # `left` is always a pd.Series when called from within ops + # `left` is always a Series when called from within ops if isinstance(right, (ABCSeries, ABCIndexClass)): name = _maybe_match_name(left, right) else: @@ -609,42 +610,6 @@ def column_op(a, b): return result -def dispatch_to_index_op(op, left, right, index_class): - """ - Wrap Series left in the given index_class to delegate the operation op - to the index implementation. DatetimeIndex and TimedeltaIndex perform - type checking, timezone handling, overflow checks, etc. - - Parameters - ---------- - op : binary operator (operator.add, operator.sub, ...) - left : Series - right : object - index_class : DatetimeIndex or TimedeltaIndex - - Returns - ------- - result : object, usually DatetimeIndex, TimedeltaIndex, or Series - """ - left_idx = index_class(left) - - # avoid accidentally allowing integer add/sub. For datetime64[tz] dtypes, - # left_idx may inherit a freq from a cached DatetimeIndex. - # See discussion in GH#19147. - if getattr(left_idx, "freq", None) is not None: - left_idx = left_idx._shallow_copy(freq=None) - try: - result = op(left_idx, right) - except NullFrequencyError: - # DatetimeIndex and TimedeltaIndex with freq == None raise ValueError - # on add/sub of integers (or int-like). We re-raise as a TypeError. - raise TypeError( - "incompatible type for a datetime/timedelta " - "operation [{name}]".format(name=op.__name__) - ) - return result - - def dispatch_to_extension_op(op, left, right): """ Assume that left or right is a Series backed by an ExtensionArray, @@ -665,13 +630,16 @@ def dispatch_to_extension_op(op, left, right): else: new_right = right - res_values = op(new_left, new_right) - res_name = get_op_result_name(left, right) - - if op.__name__ in ["divmod", "rdivmod"]: - return _construct_divmod_result(left, res_values, left.index, res_name) - - return _construct_result(left, res_values, left.index, res_name) + try: + res_values = op(new_left, new_right) + except NullFrequencyError: + # DatetimeIndex and TimedeltaIndex with freq == None raise ValueError + # on add/sub of integers (or int-like). We re-raise as a TypeError. + raise TypeError( + "incompatible type for a datetime/timedelta " + "operation [{name}]".format(name=op.__name__) + ) + return res_values # ----------------------------------------------------------------------------- @@ -993,22 +961,22 @@ def wrapper(left, right): ) elif is_datetime64_dtype(left) or is_datetime64tz_dtype(left): - # Give dispatch_to_index_op a chance for tests like - # test_dt64_series_add_intlike, which the index dispatching handles - # specifically. - result = dispatch_to_index_op(op, left, right, pd.DatetimeIndex) - return construct_result( - left, result, index=left.index, name=res_name, dtype=result.dtype - ) + from pandas.core.arrays import DatetimeArray + + result = dispatch_to_extension_op(op, DatetimeArray(left), right) + return construct_result(left, result, index=left.index, name=res_name) elif is_extension_array_dtype(left) or ( is_extension_array_dtype(right) and not is_scalar(right) ): # GH#22378 disallow scalar to exclude e.g. "category", "Int64" - return dispatch_to_extension_op(op, left, right) + result = dispatch_to_extension_op(op, left, right) + return construct_result(left, result, index=left.index, name=res_name) elif is_timedelta64_dtype(left): - result = dispatch_to_index_op(op, left, right, pd.TimedeltaIndex) + from pandas.core.arrays import TimedeltaArray + + result = dispatch_to_extension_op(op, TimedeltaArray(left), right) return construct_result(left, result, index=left.index, name=res_name) elif is_timedelta64_dtype(right): @@ -1022,7 +990,7 @@ def wrapper(left, right): # does inference in the case where `result` has object-dtype. return construct_result(left, result, index=left.index, name=res_name) - elif isinstance(right, (ABCDatetimeArray, pd.DatetimeIndex)): + elif isinstance(right, (ABCDatetimeArray, ABCDatetimeIndex)): result = op(left._values, right) return construct_result(left, result, index=left.index, name=res_name) @@ -1129,20 +1097,23 @@ def wrapper(self, other, axis=None): raise ValueError("Can only compare identically-labeled Series objects") elif is_categorical_dtype(self): - # Dispatch to Categorical implementation; pd.CategoricalIndex + # Dispatch to Categorical implementation; CategoricalIndex # behavior is non-canonical GH#19513 - res_values = dispatch_to_index_op(op, self, other, pd.Categorical) + res_values = dispatch_to_extension_op(op, self, other) return self._constructor(res_values, index=self.index, name=res_name) elif is_datetime64_dtype(self) or is_datetime64tz_dtype(self): # Dispatch to DatetimeIndex to ensure identical # Series/Index behavior + from pandas.core.arrays import DatetimeArray - res_values = dispatch_to_index_op(op, self, other, pd.DatetimeIndex) + res_values = dispatch_to_extension_op(op, DatetimeArray(self), other) return self._constructor(res_values, index=self.index, name=res_name) elif is_timedelta64_dtype(self): - res_values = dispatch_to_index_op(op, self, other, pd.TimedeltaIndex) + from pandas.core.arrays import TimedeltaArray + + res_values = dispatch_to_extension_op(op, TimedeltaArray(self), other) return self._constructor(res_values, index=self.index, name=res_name) elif is_extension_array_dtype(self) or ( @@ -1150,7 +1121,8 @@ def wrapper(self, other, axis=None): ): # Note: the `not is_scalar(other)` condition rules out # e.g. other == "category" - return dispatch_to_extension_op(op, self, other) + res_values = dispatch_to_extension_op(op, self, other) + return self._constructor(res_values, index=self.index).rename(res_name) elif isinstance(other, ABCSeries): # By this point we have checked that self._indexed_same(other)
Took a couple of preliminaries, but now we can merge these two together and we're within striking distance of being able to collapse all of the Series ops into much simpler functions.
https://api.github.com/repos/pandas-dev/pandas/pulls/27747
2019-08-04T23:48:14Z
2019-08-05T11:44:37Z
2019-08-05T11:44:37Z
2019-08-05T14:29:57Z
CLN: Move base.StringMixin to computations.common
diff --git a/pandas/core/base.py b/pandas/core/base.py index 38a8bf7171521..7d2a62318232c 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -46,30 +46,6 @@ ) -class StringMixin: - """ - Implements string methods so long as object defines a `__str__` method. - """ - - # side note - this could be made into a metaclass if more than one - # object needs - - # ---------------------------------------------------------------------- - # Formatting - - def __str__(self): - """ - Return a string representation for a particular Object - """ - raise AbstractMethodError(self) - - def __repr__(self): - """ - Return a string representation for a particular object. - """ - return str(self) - - class PandasObject(DirNamesMixin): """baseclass for various pandas objects""" diff --git a/pandas/core/computation/common.py b/pandas/core/computation/common.py index ddb1023479cba..b8e212fd2a32e 100644 --- a/pandas/core/computation/common.py +++ b/pandas/core/computation/common.py @@ -36,3 +36,8 @@ def _remove_spaces_column_name(name): class NameResolutionError(NameError): pass + + +class StringMixin: + # TODO: delete this class. Removing this ATM caused a failure. + pass diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py index e10d189bc3c6f..d0d87c23e9346 100644 --- a/pandas/core/computation/expr.py +++ b/pandas/core/computation/expr.py @@ -13,7 +13,6 @@ import pandas as pd from pandas.core import common as com -from pandas.core.base import StringMixin from pandas.core.computation.common import ( _BACKTICK_QUOTED_STRING, _remove_spaces_column_name, @@ -799,7 +798,7 @@ def __init__(self, env, engine, parser, preparser=lambda x: x): super().__init__(env, engine, parser, preparser=preparser) -class Expr(StringMixin): +class Expr: """Object encapsulating an expression. @@ -831,7 +830,7 @@ def assigner(self): def __call__(self): return self.terms(self.env) - def __str__(self): + def __repr__(self): return printing.pprint_thing(self.terms) def __len__(self): diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py index 870acc3cc9956..2bf09a553ce18 100644 --- a/pandas/core/computation/ops.py +++ b/pandas/core/computation/ops.py @@ -12,7 +12,6 @@ from pandas.core.dtypes.common import is_list_like, is_scalar -from pandas.core.base import StringMixin import pandas.core.common as com from pandas.core.computation.common import _ensure_decoded, _result_type_many from pandas.core.computation.scope import _DEFAULT_GLOBALS @@ -63,7 +62,7 @@ def __init__(self, name, is_local): super().__init__(msg.format(name)) -class Term(StringMixin): +class Term: def __new__(cls, name, env, side=None, encoding=None): klass = Constant if not isinstance(name, str) else cls supr_new = super(Term, klass).__new__ @@ -82,7 +81,7 @@ def __init__(self, name, env, side=None, encoding=None): def local_name(self): return self.name.replace(_LOCAL_TAG, "") - def __str__(self): + def __repr__(self): return pprint_thing(self.name) def __call__(self, *args, **kwargs): @@ -182,7 +181,7 @@ def _resolve_name(self): def name(self): return self.value - def __str__(self): + def __repr__(self): # in python 2 str() of float # can truncate shorter than repr() return repr(self.name) @@ -191,7 +190,7 @@ def __str__(self): _bool_op_map = {"not": "~", "and": "&", "or": "|"} -class Op(StringMixin): +class Op: """Hold an operator of arbitrary arity """ @@ -204,7 +203,7 @@ def __init__(self, op, operands, *args, **kwargs): def __iter__(self): return iter(self.operands) - def __str__(self): + def __repr__(self): """Print a generic n-ary operator and its operands using infix notation""" # recurse over the operands @@ -537,7 +536,7 @@ def __call__(self, env): operand = self.operand(env) return self.func(operand) - def __str__(self): + def __repr__(self): return pprint_thing("{0}({1})".format(self.op, self.operand)) @property @@ -562,7 +561,7 @@ def __call__(self, env): with np.errstate(all="ignore"): return self.func.func(*operands) - def __str__(self): + def __repr__(self): operands = map(str, self.operands) return pprint_thing("{0}({1})".format(self.op, ",".join(operands))) diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index 60cf35163bcf4..1523eb05ac41d 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -11,7 +11,6 @@ from pandas.core.dtypes.common import is_list_like import pandas as pd -from pandas.core.base import StringMixin import pandas.core.common as com from pandas.core.computation import expr, ops from pandas.core.computation.common import _ensure_decoded @@ -32,8 +31,7 @@ def __init__(self, level, global_dict=None, local_dict=None, queryables=None): class Term(ops.Term): def __new__(cls, name, env, side=None, encoding=None): klass = Constant if not isinstance(name, str) else cls - supr_new = StringMixin.__new__ - return supr_new(klass) + return object.__new__(klass) def __init__(self, name, env, side=None, encoding=None): super().__init__(name, env, side=side, encoding=encoding) @@ -231,7 +229,7 @@ def convert_values(self): class FilterBinOp(BinOp): - def __str__(self): + def __repr__(self): return pprint_thing( "[Filter : [{lhs}] -> [{op}]".format(lhs=self.filter[0], op=self.filter[1]) ) @@ -297,7 +295,7 @@ def evaluate(self): class ConditionBinOp(BinOp): - def __str__(self): + def __repr__(self): return pprint_thing("[Condition : [{cond}]]".format(cond=self.condition)) def invert(self): @@ -548,7 +546,7 @@ def __init__(self, where, queryables=None, encoding=None, scope_level=0): ) self.terms = self.parse() - def __str__(self): + def __repr__(self): if self.terms is not None: return pprint_thing(self.terms) return pprint_thing(self.expr) diff --git a/pandas/core/computation/scope.py b/pandas/core/computation/scope.py index 4d5a523337f66..8ddd0dd7622e7 100644 --- a/pandas/core/computation/scope.py +++ b/pandas/core/computation/scope.py @@ -15,8 +15,8 @@ from pandas._libs.tslibs import Timestamp from pandas.compat.chainmap import DeepChainMap -from pandas.core.base import StringMixin import pandas.core.computation as compu +from pandas.core.computation.common import StringMixin def _ensure_scope( @@ -141,7 +141,7 @@ def __init__( self.resolvers = DeepChainMap(*resolvers) self.temps = {} - def __str__(self): + def __repr__(self): scope_keys = _get_pretty_string(list(self.scope.keys())) res_keys = _get_pretty_string(list(self.resolvers.keys())) unicode_str = "{name}(scope={scope_keys}, resolvers={res_keys})" diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py index 3e8f653c47424..3c6da304dd68d 100644 --- a/pandas/tests/series/test_repr.py +++ b/pandas/tests/series/test_repr.py @@ -14,7 +14,6 @@ period_range, timedelta_range, ) -from pandas.core.base import StringMixin from pandas.core.index import MultiIndex import pandas.util.testing as tm @@ -226,11 +225,11 @@ class TestCategoricalRepr: def test_categorical_repr_unicode(self): # see gh-21002 - class County(StringMixin): + class County: name = "San Sebastián" state = "PR" - def __str__(self): + def __repr__(self): return self.name + ", " + self.state cat = pd.Categorical([County() for _ in range(61)])
``StringMixin`` should be removed, but I can't figure out how to remove it without breaking tests, so this moves ``StringMixin`` to core.computation, which is the only module, where it is currently used. This makes ``core.base.py`` a bit cleaner.
https://api.github.com/repos/pandas-dev/pandas/pulls/27746
2019-08-04T23:17:33Z
2019-08-05T11:41:53Z
2019-08-05T11:41:53Z
2019-08-05T13:25:11Z
Backport PR #27702 on branch 0.25.x (BUG: Concatenation warning still appears with sort=False)
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index c80195af413f7..01e4046e8b743 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -125,7 +125,7 @@ Reshaping ^^^^^^^^^ - A ``KeyError`` is now raised if ``.unstack()`` is called on a :class:`Series` or :class:`DataFrame` with a flat :class:`Index` passing a name which is not the correct one (:issue:`18303`) -- +- :meth:`DataFrame.join` now suppresses the ``FutureWarning`` when the sort parameter is specified (:issue:`21952`) - Sparse diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 245e41ed16eb2..0570b9af2d256 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -7274,10 +7274,14 @@ def _join_compat( # join indexes only using concat if can_concat: if how == "left": - res = concat(frames, axis=1, join="outer", verify_integrity=True) + res = concat( + frames, axis=1, join="outer", verify_integrity=True, sort=sort + ) return res.reindex(self.index, copy=False) else: - return concat(frames, axis=1, join=how, verify_integrity=True) + return concat( + frames, axis=1, join=how, verify_integrity=True, sort=sort + ) joined = frames[0] diff --git a/pandas/tests/frame/test_join.py b/pandas/tests/frame/test_join.py index adace5e4784ae..220968d4b3d29 100644 --- a/pandas/tests/frame/test_join.py +++ b/pandas/tests/frame/test_join.py @@ -193,3 +193,32 @@ def test_join_left_sequence_non_unique_index(): ) tm.assert_frame_equal(joined, expected) + + +@pytest.mark.parametrize("sort_kw", [True, False, None]) +def test_suppress_future_warning_with_sort_kw(sort_kw): + a = DataFrame({"col1": [1, 2]}, index=["c", "a"]) + + b = DataFrame({"col2": [4, 5]}, index=["b", "a"]) + + c = DataFrame({"col3": [7, 8]}, index=["a", "b"]) + + expected = DataFrame( + { + "col1": {"a": 2.0, "b": float("nan"), "c": 1.0}, + "col2": {"a": 5.0, "b": 4.0, "c": float("nan")}, + "col3": {"a": 7.0, "b": 8.0, "c": float("nan")}, + } + ) + if sort_kw is False: + expected = expected.reindex(index=["c", "a", "b"]) + + if sort_kw is None: + # only warn if not explicitly specified + ctx = tm.assert_produces_warning(FutureWarning, check_stacklevel=False) + else: + ctx = tm.assert_produces_warning(None, check_stacklevel=False) + + with ctx: + result = a.join([b, c], how="outer", sort=sort_kw) + tm.assert_frame_equal(result, expected)
Backport PR #27702: BUG: Concatenation warning still appears with sort=False
https://api.github.com/repos/pandas-dev/pandas/pulls/27743
2019-08-04T21:59:22Z
2019-08-05T06:35:41Z
2019-08-05T06:35:41Z
2019-08-05T06:35:41Z
Backport PR #27712 on branch 0.25.x (BUG: partial string indexing with scalar)
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index c80195af413f7..3097bfa21f9e1 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -82,7 +82,7 @@ Interval Indexing ^^^^^^^^ -- +- Bug in partial-string indexing returning a NumPy array rather than a ``Series`` when indexing with a scalar like ``.loc['2015']`` (:issue:`27516`) - - diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 12923fd790972..17122d0981995 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -263,6 +263,9 @@ def _outer_indexer(self, left, right): _infer_as_myclass = False _engine_type = libindex.ObjectEngine + # whether we support partial string indexing. Overridden + # in DatetimeIndex and PeriodIndex + _supports_partial_string_indexing = False _accessors = {"str"} diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 5024eebe03bb4..ab4975c0b359a 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -238,6 +238,7 @@ def _join_i8_wrapper(joinf, **kwargs): ) _engine_type = libindex.DatetimeEngine + _supports_partial_string_indexing = True _tz = None _freq = None diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 47cf0f26f9ca5..96031645365c6 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -173,6 +173,7 @@ class PeriodIndex(DatetimeIndexOpsMixin, Int64Index, PeriodDelegateMixin): _data = None _engine_type = libindex.PeriodEngine + _supports_partial_string_indexing = True # ------------------------------------------------------------------------ # Index Constructors diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 5aee37bc3b833..8f242f0ae7d7c 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1726,6 +1726,11 @@ def _is_scalar_access(self, key: Tuple): if isinstance(ax, MultiIndex): return False + if isinstance(k, str) and ax._supports_partial_string_indexing: + # partial string indexing, df.loc['2000', 'A'] + # should not be considered scalar + return False + if not ax.is_unique: return False @@ -1741,7 +1746,10 @@ def _get_partial_string_timestamp_match_key(self, key, labels): """Translate any partial string timestamp matches in key, returning the new key (GH 10331)""" if isinstance(labels, MultiIndex): - if isinstance(key, str) and labels.levels[0].is_all_dates: + if ( + isinstance(key, str) + and labels.levels[0]._supports_partial_string_indexing + ): # Convert key '2016-01-01' to # ('2016-01-01'[, slice(None, None, None)]+) key = tuple([key] + [slice(None)] * (len(labels.levels) - 1)) @@ -1751,7 +1759,10 @@ def _get_partial_string_timestamp_match_key(self, key, labels): # (..., slice('2016-01-01', '2016-01-01', None), ...) new_key = [] for i, component in enumerate(key): - if isinstance(component, str) and labels.levels[i].is_all_dates: + if ( + isinstance(component, str) + and labels.levels[i]._supports_partial_string_indexing + ): new_key.append(slice(component, component, None)) else: new_key.append(component) @@ -2340,7 +2351,7 @@ def convert_to_index_sliceable(obj, key): # We might have a datetimelike string that we can translate to a # slice here via partial string indexing - if idx.is_all_dates: + if idx._supports_partial_string_indexing: try: return idx._get_string_slice(key) except (KeyError, ValueError, NotImplementedError): diff --git a/pandas/tests/indexes/datetimes/test_partial_slicing.py b/pandas/tests/indexes/datetimes/test_partial_slicing.py index 3095bf9657277..5660fa5ffed80 100644 --- a/pandas/tests/indexes/datetimes/test_partial_slicing.py +++ b/pandas/tests/indexes/datetimes/test_partial_slicing.py @@ -468,3 +468,14 @@ def test_getitem_with_datestring_with_UTC_offset(self, start, end): with pytest.raises(ValueError, match="The index must be timezone"): df = df.tz_localize(None) df[start:end] + + def test_slice_reduce_to_series(self): + # GH 27516 + df = pd.DataFrame( + {"A": range(24)}, index=pd.date_range("2000", periods=24, freq="M") + ) + expected = pd.Series( + range(12), index=pd.date_range("2000", periods=12, freq="M"), name="A" + ) + result = df.loc["2000", "A"] + tm.assert_series_equal(result, expected)
Backport PR #27712: BUG: partial string indexing with scalar
https://api.github.com/repos/pandas-dev/pandas/pulls/27742
2019-08-04T21:55:22Z
2019-08-05T06:35:59Z
2019-08-05T06:35:59Z
2019-08-05T06:35:59Z
CLN: deprivatize names in pd.core.common
diff --git a/pandas/core/common.py b/pandas/core/common.py index c12bfecc46518..a507625ccfa01 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -165,51 +165,39 @@ def cast_scalar_indexer(val): return val -def _not_none(*args): +def not_none(*args): """ Returns a generator consisting of the arguments that are not None. """ return (arg for arg in args if arg is not None) -def _any_none(*args): +def any_none(*args): """ Returns a boolean indicating if any argument is None. """ - for arg in args: - if arg is None: - return True - return False + return any(arg is None for arg in args) -def _all_none(*args): +def all_none(*args): """ Returns a boolean indicating if all arguments are None. """ - for arg in args: - if arg is not None: - return False - return True + return all(arg is None for arg in args) -def _any_not_none(*args): +def any_not_none(*args): """ Returns a boolean indicating if any argument is not None. """ - for arg in args: - if arg is not None: - return True - return False + return any(arg is not None for arg in args) -def _all_not_none(*args): +def all_not_none(*args): """ Returns a boolean indicating if all arguments are not None. """ - for arg in args: - if arg is None: - return False - return True + return all(arg is not None for arg in args) def count_not_none(*args): @@ -447,7 +435,7 @@ def random_state(state=None): ) -def _pipe(obj, func, *args, **kwargs): +def pipe(obj, func, *args, **kwargs): """ Apply a function ``func`` to object ``obj`` either by passing obj as the first argument to the function or, in the case that the func is a tuple, @@ -482,7 +470,7 @@ def _pipe(obj, func, *args, **kwargs): return func(obj, *args, **kwargs) -def _get_rename_function(mapper): +def get_rename_function(mapper): """ Returns a function that will map names/labels, dependent if mapper is a dict, Series or just a function. diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 1d87a6937ca34..9078e967b4d7e 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1124,7 +1124,7 @@ def rename(self, *args, **kwargs): v = axes.get(self._AXIS_NAMES[axis]) if v is None: continue - f = com._get_rename_function(v) + f = com.get_rename_function(v) baxis = self._get_block_manager_axis(axis) if level is not None: level = self.axes[axis]._get_level_number(level) @@ -1312,7 +1312,7 @@ class name if non_mapper: newnames = v else: - f = com._get_rename_function(v) + f = com.get_rename_function(v) curnames = self._get_axis(axis).names newnames = [f(name) for name in curnames] result._set_axis_name(newnames, axis=axis, inplace=True) @@ -4993,7 +4993,7 @@ def sample( @Appender(_shared_docs["pipe"] % _shared_doc_kwargs) def pipe(self, func, *args, **kwargs): - return com._pipe(self, func, *args, **kwargs) + return com.pipe(self, func, *args, **kwargs) _shared_docs["aggregate"] = dedent( """ diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 811836d0e8a4d..2ad85903b916b 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -361,7 +361,7 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): # GH12824. def first_not_none(values): try: - return next(com._not_none(*values)) + return next(com.not_none(*values)) except StopIteration: return None diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 12b9cf25687cf..ec526b338eee1 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -590,7 +590,7 @@ def __getattr__(self, attr): ) @Appender(_pipe_template) def pipe(self, func, *args, **kwargs): - return com._pipe(self, func, *args, **kwargs) + return com.pipe(self, func, *args, **kwargs) plot = property(GroupByPlot) @@ -928,7 +928,7 @@ def _concat_objects(self, keys, values, not_indexed_same=False): def reset_identity(values): # reset the identities of the components # of the values to prevent aliasing - for v in com._not_none(*values): + for v in com.not_none(*values): ax = v._get_axis(self.axis) ax._reset_identity() return values diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py index a17f74286d59f..86d55ce2e7cc3 100644 --- a/pandas/core/indexes/api.py +++ b/pandas/core/indexes/api.py @@ -283,7 +283,7 @@ def _get_consensus_names(indexes): # find the non-none names, need to tupleify to make # the set hashable, then reverse on return - consensus_names = {tuple(i.names) for i in indexes if com._any_not_none(*i.names)} + consensus_names = {tuple(i.names) for i in indexes if com.any_not_none(*i.names)} if len(consensus_names) == 1: return list(list(consensus_names)[0]) return [None] * indexes[0].nlevels diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index ce7b73a92b18a..d0de995255b59 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3588,8 +3588,8 @@ def _join_multi(self, other, how, return_indexers=True): from pandas.core.reshape.merge import _restore_dropped_levels_multijoin # figure out join names - self_names = set(com._not_none(*self.names)) - other_names = set(com._not_none(*other.names)) + self_names = set(com.not_none(*self.names)) + other_names = set(com.not_none(*other.names)) overlap = self_names & other_names # need at least 1 in common diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index d6f0008a2646f..0f7f580e2c43e 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -1569,7 +1569,7 @@ def date_range( dtype='datetime64[ns]', freq='D') """ - if freq is None and com._any_none(periods, start, end): + if freq is None and com.any_none(periods, start, end): freq = "D" dtarr = DatetimeArray._generate_range( diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index d941dc547befe..7a444683ffcb2 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -1318,7 +1318,7 @@ def _is_type_compatible(a, b): (is_number(a) and is_number(b)) or (is_ts_compat(a) and is_ts_compat(b)) or (is_td_compat(a) and is_td_compat(b)) - or com._any_none(a, b) + or com.any_none(a, b) ) @@ -1416,7 +1416,7 @@ def interval_range( end = com.maybe_box_datetimelike(end) endpoint = start if start is not None else end - if freq is None and com._any_none(periods, start, end): + if freq is None and com.any_none(periods, start, end): freq = 1 if is_number(endpoint) else "D" if com.count_not_none(start, end, periods, freq) != 3: @@ -1463,7 +1463,7 @@ def interval_range( if is_number(endpoint): # force consistency between start/end/freq (lower end if freq skips it) - if com._all_not_none(start, end, freq): + if com.all_not_none(start, end, freq): end -= (end - start) % freq # compute the period/start/end if unspecified (at most one) @@ -1475,7 +1475,7 @@ def interval_range( end = start + (periods - 1) * freq breaks = np.linspace(start, end, periods) - if all(is_integer(x) for x in com._not_none(start, end, freq)): + if all(is_integer(x) for x in com.not_none(start, end, freq)): # np.linspace always produces float output breaks = maybe_downcast_to_dtype(breaks, "int64") else: diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 16098c474a473..1389b0e31b3bf 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -110,7 +110,7 @@ def __new__( return cls._simple_new(start, dtype=dtype, name=name) # validate the arguments - if com._all_none(start, stop, step): + if com.all_none(start, stop, step): raise TypeError("RangeIndex(...) must be called with integers") start = ensure_python_int(start) if start is not None else 0 diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index a9f49ec8bd75a..f2ce562536b95 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -780,7 +780,7 @@ def timedelta_range( '5 days 00:00:00'], dtype='timedelta64[ns]', freq=None) """ - if freq is None and com._any_none(periods, start, end): + if freq is None and com.any_none(periods, start, end): freq = "D" freq, freq_infer = dtl.maybe_infer_freq(freq) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 6d70fcfb62d52..2136d3d326db5 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -599,7 +599,7 @@ def _astype(self, dtype, copy=False, errors="raise", **kwargs): categories = kwargs.get("categories", None) ordered = kwargs.get("ordered", None) - if com._any_not_none(categories, ordered): + if com.any_not_none(categories, ordered): dtype = CategoricalDtype(categories, ordered) if is_categorical_dtype(self.values): diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index ce2d2ac41d3ec..4446f27da6be0 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -304,7 +304,7 @@ def __init__( raise ValueError("No objects to concatenate") if keys is None: - objs = list(com._not_none(*objs)) + objs = list(com.not_none(*objs)) else: # #1649 clean_keys = [] diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index fc32a8f0dd044..f45c7693bf6ed 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -1958,7 +1958,7 @@ def _should_fill(lname, rname): def _any(x): - return x is not None and com._any_not_none(*x) + return x is not None and com.any_not_none(*x) def validate_operand(obj): diff --git a/pandas/core/series.py b/pandas/core/series.py index 106bb3c7d6cb4..9e317d365ccb8 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1182,7 +1182,7 @@ def _get_with(self, key): def _get_values_tuple(self, key): # mpl hackaround - if com._any_none(*key): + if com.any_none(*key): return self._get_values(key) if not isinstance(self.index, MultiIndex): diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py index 012d2d9358241..b9c847ad64c57 100644 --- a/pandas/io/formats/excel.py +++ b/pandas/io/formats/excel.py @@ -611,7 +611,7 @@ def _format_hierarchical_rows(self): self.rowcounter += 1 # if index labels are not empty go ahead and dump - if com._any_not_none(*index_labels) and self.header is not False: + if com.any_not_none(*index_labels) and self.header is not False: for cidx, name in enumerate(index_labels): yield ExcelCell(self.rowcounter - 1, cidx, name, self.header_style) diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 23c07ea72d40f..a2a0e302de5dc 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -1743,7 +1743,7 @@ def _cond(values): def _has_names(index: Index) -> bool: if isinstance(index, ABCMultiIndex): - return com._any_not_none(*index.names) + return com.any_not_none(*index.names) else: return index.name is not None diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index b736b978c87a5..033d93d1456c8 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -316,7 +316,7 @@ def format_attr(pair): if ( self.data.index.names - and com._any_not_none(*self.data.index.names) + and com.any_not_none(*self.data.index.names) and not hidden_index ): index_header_row = [] @@ -1405,7 +1405,7 @@ def pipe(self, func, *args, **kwargs): ... .pipe(format_conversion) ... .set_caption("Results with minimum conversion highlighted.")) """ - return com._pipe(self, func, *args, **kwargs) + return com.pipe(self, func, *args, **kwargs) def _is_visible(idx_row, idx_col, lengths): diff --git a/pandas/io/json/_table_schema.py b/pandas/io/json/_table_schema.py index 1e7cd54d9f4a0..b142dbf76e6b3 100644 --- a/pandas/io/json/_table_schema.py +++ b/pandas/io/json/_table_schema.py @@ -76,7 +76,7 @@ def as_json_table_type(x): def set_default_names(data): """Sets index names to 'index' for regular, or 'level_x' for Multi""" - if com._all_not_none(*data.index.names): + if com.all_not_none(*data.index.names): nms = data.index.names if len(nms) == 1 and data.index.name == "index": warnings.warn("Index name of 'index' is not round-trippable") diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 415cb50472a4c..abc8a414eb37a 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -998,7 +998,7 @@ def remove(self, key, where=None, start=None, stop=None): return None # remove the node - if com._all_none(where, start, stop): + if com.all_none(where, start, stop): s.group._f_remove(recursive=True) # delete from the table @@ -2634,7 +2634,7 @@ def delete(self, where=None, start=None, stop=None, **kwargs): support fully deleting the node in its entirety (only) - where specification must be None """ - if com._all_none(where, start, stop): + if com.all_none(where, start, stop): self._handle.remove_node(self.group, recursive=True) return None diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index 519465802085b..c2b37bb297ecb 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -654,7 +654,7 @@ def _plot(cls, ax, x, y, style=None, is_errorbar=False, **kwds): def _get_index_name(self): if isinstance(self.data.index, ABCMultiIndex): name = self.data.index.names - if com._any_not_none(*name): + if com.any_not_none(*name): name = ",".join(pprint_thing(x) for x in name) else: name = None @@ -1054,7 +1054,7 @@ def _make_plot(self): it = self._iter_data() stacking_id = self._get_stacking_id() - is_errorbar = com._any_not_none(*self.errors.values()) + is_errorbar = com.any_not_none(*self.errors.values()) colors = self._get_colors() for i, (label, y) in enumerate(it): diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index 28051d9b7f3b9..e2e4a82ff581c 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -655,7 +655,7 @@ def _make_frame(names=None): df = _make_frame(True) df.to_csv(path, index=False) result = read_csv(path, header=[0, 1]) - assert com._all_none(*result.columns.names) + assert com.all_none(*result.columns.names) result.columns.names = df.columns.names assert_frame_equal(df, result) diff --git a/pandas/tests/scalar/interval/test_interval.py b/pandas/tests/scalar/interval/test_interval.py index e4987e4483fd9..b51429d0338e3 100644 --- a/pandas/tests/scalar/interval/test_interval.py +++ b/pandas/tests/scalar/interval/test_interval.py @@ -254,6 +254,6 @@ def test_constructor_errors_tz(self, tz_left, tz_right): # GH 18538 left = Timestamp("2017-01-01", tz=tz_left) right = Timestamp("2017-01-02", tz=tz_right) - error = TypeError if com._any_none(tz_left, tz_right) else ValueError + error = TypeError if com.any_none(tz_left, tz_right) else ValueError with pytest.raises(error): Interval(left, right) diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index d96f806bc383f..479e55c86fcd1 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -33,14 +33,14 @@ def __call__(self): def test_any_none(): - assert com._any_none(1, 2, 3, None) - assert not com._any_none(1, 2, 3, 4) + assert com.any_none(1, 2, 3, None) + assert not com.any_none(1, 2, 3, 4) def test_all_not_none(): - assert com._all_not_none(1, 2, 3, 4) - assert not com._all_not_none(1, 2, 3, None) - assert not com._all_not_none(None, None, None, None) + assert com.all_not_none(1, 2, 3, 4) + assert not com.all_not_none(1, 2, 3, None) + assert not com.all_not_none(None, None, None, None) def test_random_state():
Some clean-up, as these functions don't need to start with a "_".
https://api.github.com/repos/pandas-dev/pandas/pulls/27741
2019-08-04T20:06:22Z
2019-08-04T21:26:43Z
2019-08-04T21:26:43Z
2019-08-04T21:26:47Z
BUG: Fix NaT +/- DTA/TDA
diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index 7f35a11e57b71..6fab1b5c02be1 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -123,7 +123,9 @@ cdef class _NaT(datetime): return c_NaT elif getattr(other, '_typ', None) in ['dateoffset', 'series', 'period', 'datetimeindex', - 'timedeltaindex']: + 'datetimearray', + 'timedeltaindex', + 'timedeltaarray']: # Duplicate logic in _Timestamp.__add__ to avoid needing # to subclass; allows us to @final(_Timestamp.__add__) return NotImplemented @@ -151,9 +153,10 @@ cdef class _NaT(datetime): return self + neg_other elif getattr(other, '_typ', None) in ['period', 'series', - 'periodindex', 'dateoffset']: + 'periodindex', 'dateoffset', + 'datetimearray', + 'timedeltaarray']: return NotImplemented - return NaT def __pos__(self): diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py index f935a7fa880c7..e7ad76cf95ba0 100644 --- a/pandas/tests/scalar/test_nat.py +++ b/pandas/tests/scalar/test_nat.py @@ -7,6 +7,8 @@ from pandas._libs.tslibs import iNaT import pandas.compat as compat +from pandas.core.dtypes.common import is_datetime64_any_dtype + from pandas import ( DatetimeIndex, Index, @@ -18,7 +20,7 @@ Timestamp, isna, ) -from pandas.core.arrays import PeriodArray +from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray from pandas.util import testing as tm @@ -397,7 +399,9 @@ def test_nat_rfloordiv_timedelta(val, expected): "value", [ DatetimeIndex(["2011-01-01", "2011-01-02"], name="x"), - DatetimeIndex(["2011-01-01", "2011-01-02"], name="x"), + DatetimeIndex(["2011-01-01", "2011-01-02"], tz="US/Eastern", name="x"), + DatetimeArray._from_sequence(["2011-01-01", "2011-01-02"]), + DatetimeArray._from_sequence(["2011-01-01", "2011-01-02"], tz="US/Pacific"), TimedeltaIndex(["1 day", "2 day"], name="x"), ], ) @@ -406,19 +410,24 @@ def test_nat_arithmetic_index(op_name, value): exp_name = "x" exp_data = [NaT] * 2 - if isinstance(value, DatetimeIndex) and "plus" in op_name: - expected = DatetimeIndex(exp_data, name=exp_name, tz=value.tz) + if is_datetime64_any_dtype(value.dtype) and "plus" in op_name: + expected = DatetimeIndex(exp_data, tz=value.tz, name=exp_name) else: expected = TimedeltaIndex(exp_data, name=exp_name) - tm.assert_index_equal(_ops[op_name](NaT, value), expected) + if not isinstance(value, Index): + expected = expected.array + + op = _ops[op_name] + result = op(NaT, value) + tm.assert_equal(result, expected) @pytest.mark.parametrize( "op_name", ["left_plus_right", "right_plus_left", "left_minus_right", "right_minus_left"], ) -@pytest.mark.parametrize("box", [TimedeltaIndex, Series]) +@pytest.mark.parametrize("box", [TimedeltaIndex, Series, TimedeltaArray._from_sequence]) def test_nat_arithmetic_td64_vector(op_name, box): # see gh-19124 vec = box(["1 day", "2 day"], dtype="timedelta64[ns]")
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry After this bugfix, we can change all existing uses of `dispatch_to_index_op` in `core.ops.__init__` to use `dispatch_to_extension_op`. Once we do that, we can collapse ~50 lines of typechecking-dispatching code to all use `dispatch_to_extension_op`
https://api.github.com/repos/pandas-dev/pandas/pulls/27740
2019-08-04T16:28:31Z
2019-08-04T23:01:03Z
2019-08-04T23:01:02Z
2019-08-04T23:08:43Z
BUG: fix+test PA+all-NaT TDA
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 91dd853e78c77..6203cfdf6df6b 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -714,7 +714,12 @@ def _add_delta_tdi(self, other): """ assert isinstance(self.freq, Tick) # checked by calling function - delta = self._check_timedeltalike_freq_compat(other) + if not np.all(isna(other)): + delta = self._check_timedeltalike_freq_compat(other) + else: + # all-NaT TimedeltaIndex is equivalent to a single scalar td64 NaT + return self + np.timedelta64("NaT") + return self._addsub_int_array(delta, operator.add).asi8 def _add_delta(self, other): diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index a9d18c194889c..01bfbed1aab4c 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -5,7 +5,7 @@ """ import datetime import operator -from typing import Any, Callable +from typing import Any, Callable, Tuple import numpy as np @@ -42,7 +42,6 @@ ABCSeries, ABCSparseArray, ABCSparseSeries, - ABCTimedeltaArray, ) from pandas.core.dtypes.missing import isna, notna @@ -134,7 +133,7 @@ def _maybe_match_name(a, b): return None -def maybe_upcast_for_op(obj): +def maybe_upcast_for_op(obj, shape: Tuple[int, ...]): """ Cast non-pandas objects to pandas types to unify behavior of arithmetic and comparison operations. @@ -142,6 +141,7 @@ def maybe_upcast_for_op(obj): Parameters ---------- obj: object + shape : tuple[int] Returns ------- @@ -157,13 +157,22 @@ def maybe_upcast_for_op(obj): # implementation; otherwise operation against numeric-dtype # raises TypeError return Timedelta(obj) - elif isinstance(obj, np.timedelta64) and not isna(obj): + elif isinstance(obj, np.timedelta64): + if isna(obj): + # wrapping timedelta64("NaT") in Timedelta returns NaT, + # which would incorrectly be treated as a datetime-NaT, so + # we broadcast and wrap in a Series + right = np.broadcast_to(obj, shape) + + # Note: we use Series instead of TimedeltaIndex to avoid having + # to worry about catching NullFrequencyError. + return pd.Series(right) + # In particular non-nanosecond timedelta64 needs to be cast to # nanoseconds, or else we get undesired behavior like # np.timedelta64(3, 'D') / 2 == np.timedelta64(1, 'D') - # The isna check is to avoid casting timedelta64("NaT"), which would - # return NaT and incorrectly be treated as a datetime-NaT. return Timedelta(obj) + elif isinstance(obj, np.ndarray) and is_timedelta64_dtype(obj): # GH#22390 Unfortunately we need to special-case right-hand # timedelta64 dtypes because numpy casts integer dtypes to @@ -975,7 +984,7 @@ def wrapper(left, right): left, right = _align_method_SERIES(left, right) res_name = get_op_result_name(left, right) - right = maybe_upcast_for_op(right) + right = maybe_upcast_for_op(right, left.shape) if is_categorical_dtype(left): raise TypeError( @@ -1003,31 +1012,11 @@ def wrapper(left, right): return construct_result(left, result, index=left.index, name=res_name) elif is_timedelta64_dtype(right): - # We should only get here with non-scalar or timedelta64('NaT') - # values for right - # Note: we cannot use dispatch_to_index_op because - # that may incorrectly raise TypeError when we - # should get NullFrequencyError - orig_right = right - if is_scalar(right): - # broadcast and wrap in a TimedeltaIndex - assert np.isnat(right) - right = np.broadcast_to(right, left.shape) - right = pd.TimedeltaIndex(right) - - assert isinstance(right, (pd.TimedeltaIndex, ABCTimedeltaArray, ABCSeries)) - try: - result = op(left._values, right) - except NullFrequencyError: - if orig_right is not right: - # i.e. scalar timedelta64('NaT') - # We get a NullFrequencyError because we broadcast to - # TimedeltaIndex, but this should be TypeError. - raise TypeError( - "incompatible type for a datetime/timedelta " - "operation [{name}]".format(name=op.__name__) - ) - raise + # We should only get here with non-scalar values for right + # upcast by maybe_upcast_for_op + assert not isinstance(right, (np.timedelta64, np.ndarray)) + + result = op(left._values, right) # We do not pass dtype to ensure that the Series constructor # does inference in the case where `result` has object-dtype. diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py index e54c16c7a27a4..c1b32e8b13442 100644 --- a/pandas/tests/arithmetic/test_period.py +++ b/pandas/tests/arithmetic/test_period.py @@ -12,6 +12,7 @@ import pandas as pd from pandas import Period, PeriodIndex, Series, period_range from pandas.core import ops +from pandas.core.arrays import TimedeltaArray import pandas.util.testing as tm from pandas.tseries.frequencies import to_offset @@ -1013,6 +1014,33 @@ def test_parr_add_sub_td64_nat(self, box_transpose_fail): with pytest.raises(TypeError): other - obj + @pytest.mark.parametrize( + "other", + [ + np.array(["NaT"] * 9, dtype="m8[ns]"), + TimedeltaArray._from_sequence(["NaT"] * 9), + ], + ) + def test_parr_add_sub_tdt64_nat_array(self, box_df_fail, other): + # FIXME: DataFrame fails because when when operating column-wise + # timedelta64 entries become NaT and are treated like datetimes + box = box_df_fail + + pi = pd.period_range("1994-04-01", periods=9, freq="19D") + expected = pd.PeriodIndex(["NaT"] * 9, freq="19D") + + obj = tm.box_expected(pi, box) + expected = tm.box_expected(expected, box) + + result = obj + other + tm.assert_equal(result, expected) + result = other + obj + tm.assert_equal(result, expected) + result = obj - other + tm.assert_equal(result, expected) + with pytest.raises(TypeError): + other - obj + class TestPeriodSeriesArithmetic: def test_ops_series_timedelta(self):
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry In conjunction with one more branch coming up, we're going to simplify the tar out of Series arithmetic ops.
https://api.github.com/repos/pandas-dev/pandas/pulls/27739
2019-08-04T16:05:31Z
2019-08-04T21:40:45Z
2019-08-04T21:40:45Z
2019-08-04T23:11:29Z
DOC: update docstrings following refactor of buffer handling
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 1d87a6937ca34..d09d11d20e137 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -68,6 +68,7 @@ from pandas.core.internals import BlockManager from pandas.core.ops import _align_method_FRAME +from pandas.io.formats import format as fmt from pandas.io.formats.format import DataFrameFormatter, format_percentiles from pandas.io.formats.printing import pprint_thing from pandas.tseries.frequencies import to_offset @@ -2881,6 +2882,7 @@ class (index) object 'bird' 'bird' 'mammal' 'mammal' else: return xarray.Dataset.from_dataframe(self) + @Substitution(returns=fmt.return_docstring) def to_latex( self, buf=None, @@ -2914,7 +2916,7 @@ def to_latex( Parameters ---------- - buf : file descriptor or None + buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. columns : list of label, optional The subset of columns to write. Writes all columns by default. @@ -2979,13 +2981,7 @@ def to_latex( from the pandas config module. .. versionadded:: 0.20.0 - - Returns - ------- - str or None - If buf is None, returns the resulting LateX format as a - string. Otherwise returns None. - + %(returns)s See Also -------- DataFrame.to_string : Render a DataFrame to a console-friendly diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 23c07ea72d40f..6cb224d7722d8 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -85,8 +85,8 @@ common_docstring = """ Parameters ---------- - buf : StringIO-like, optional - Buffer to write to. + buf : str, Path or StringIO-like, optional, default None + Buffer to write to. If None, the output is returned as a string. columns : sequence, optional, default None The subset of columns to write. Writes all columns by default. col_space : %(col_space_type)s, optional @@ -156,8 +156,9 @@ return_docstring = """ Returns ------- - str (or unicode, depending on data and options) - String representation of the dataframe. + str or None + If buf is None, returns the result as a string. Otherwise returns + None. """ @@ -471,6 +472,10 @@ def _get_formatter(self, i: Union[str, int]) -> Optional[Callable]: def get_buffer( self, buf: Optional[FilePathOrBuffer[str]], encoding: Optional[str] = None ): + """ + Context manager to open, yield and close buffer for filenames or Path-like + objects, otherwise yield buf unchanged. + """ if buf is not None: buf = _stringify_path(buf) else: @@ -488,6 +493,9 @@ def get_buffer( raise TypeError("buf is not a file name and it has no write method") def write_result(self, buf: IO[str]) -> None: + """ + Write the result of serialization to buf. + """ raise AbstractMethodError(self) def get_result( @@ -495,6 +503,9 @@ def get_result( buf: Optional[FilePathOrBuffer[str]] = None, encoding: Optional[str] = None, ) -> Optional[str]: + """ + Perform serialization. Write to buf or return as string if buf is None. + """ with self.get_buffer(buf, encoding=encoding) as f: self.write_result(buf=f) if buf is None:
xref https://github.com/pandas-dev/pandas/pull/27598#discussion_r310128723
https://api.github.com/repos/pandas-dev/pandas/pulls/27738
2019-08-04T11:08:54Z
2019-08-04T21:28:23Z
2019-08-04T21:28:23Z
2019-08-05T09:35:04Z
REF: pandas/core/window.py into multiple files
diff --git a/doc/source/reference/window.rst b/doc/source/reference/window.rst index 9e1374a3bd8e4..2f6addf607877 100644 --- a/doc/source/reference/window.rst +++ b/doc/source/reference/window.rst @@ -5,7 +5,6 @@ ====== Window ====== -.. currentmodule:: pandas.core.window Rolling objects are returned by ``.rolling`` calls: :func:`pandas.DataFrame.rolling`, :func:`pandas.Series.rolling`, etc. Expanding objects are returned by ``.expanding`` calls: :func:`pandas.DataFrame.expanding`, :func:`pandas.Series.expanding`, etc. @@ -13,6 +12,8 @@ EWM objects are returned by ``.ewm`` calls: :func:`pandas.DataFrame.ewm`, :func: Standard moving window functions -------------------------------- +.. currentmodule:: pandas.core.window.rolling + .. autosummary:: :toctree: api/ @@ -38,6 +39,8 @@ Standard moving window functions Standard expanding window functions ----------------------------------- +.. currentmodule:: pandas.core.window.expanding + .. autosummary:: :toctree: api/ @@ -59,6 +62,8 @@ Standard expanding window functions Exponentially-weighted moving window functions ---------------------------------------------- +.. currentmodule:: pandas.core.window.ewm + .. autosummary:: :toctree: api/ diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 1b39f9225a0ed..4d29f19cc01ed 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -10683,9 +10683,9 @@ def _add_series_or_dataframe_operations(cls): the doc strings again. """ - from pandas.core import window as rwindow + from pandas.core.window import EWM, Expanding, Rolling, Window - @Appender(rwindow.rolling.__doc__) + @Appender(Rolling.__doc__) def rolling( self, window, @@ -10697,7 +10697,20 @@ def rolling( closed=None, ): axis = self._get_axis_number(axis) - return rwindow.rolling( + + if win_type is not None: + return Window( + self, + window=window, + min_periods=min_periods, + center=center, + win_type=win_type, + on=on, + axis=axis, + closed=closed, + ) + + return Rolling( self, window=window, min_periods=min_periods, @@ -10710,16 +10723,14 @@ def rolling( cls.rolling = rolling - @Appender(rwindow.expanding.__doc__) + @Appender(Expanding.__doc__) def expanding(self, min_periods=1, center=False, axis=0): axis = self._get_axis_number(axis) - return rwindow.expanding( - self, min_periods=min_periods, center=center, axis=axis - ) + return Expanding(self, min_periods=min_periods, center=center, axis=axis) cls.expanding = expanding - @Appender(rwindow.ewm.__doc__) + @Appender(EWM.__doc__) def ewm( self, com=None, @@ -10732,7 +10743,7 @@ def ewm( axis=0, ): axis = self._get_axis_number(axis) - return rwindow.ewm( + return EWM( self, com=com, span=span, diff --git a/pandas/core/window/__init__.py b/pandas/core/window/__init__.py new file mode 100644 index 0000000000000..dcf58a4c0dd5b --- /dev/null +++ b/pandas/core/window/__init__.py @@ -0,0 +1,3 @@ +from pandas.core.window.ewm import EWM # noqa:F401 +from pandas.core.window.expanding import Expanding, ExpandingGroupby # noqa:F401 +from pandas.core.window.rolling import Rolling, RollingGroupby, Window # noqa:F401 diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py new file mode 100644 index 0000000000000..0f2920b3558c9 --- /dev/null +++ b/pandas/core/window/common.py @@ -0,0 +1,276 @@ +"""Common utility functions for rolling operations""" +from collections import defaultdict +import warnings + +import numpy as np + +from pandas.core.dtypes.common import is_integer +from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries + +import pandas.core.common as com +from pandas.core.generic import _shared_docs +from pandas.core.groupby.base import GroupByMixin +from pandas.core.index import MultiIndex + +_shared_docs = dict(**_shared_docs) +_doc_template = """ + Returns + ------- + Series or DataFrame + Return type is determined by the caller. + + See Also + -------- + Series.%(name)s : Series %(name)s. + DataFrame.%(name)s : DataFrame %(name)s. +""" + + +class _GroupByMixin(GroupByMixin): + """ + Provide the groupby facilities. + """ + + def __init__(self, obj, *args, **kwargs): + parent = kwargs.pop("parent", None) # noqa + groupby = kwargs.pop("groupby", None) + if groupby is None: + groupby, obj = obj, obj.obj + self._groupby = groupby + self._groupby.mutated = True + self._groupby.grouper.mutated = True + super().__init__(obj, *args, **kwargs) + + count = GroupByMixin._dispatch("count") + corr = GroupByMixin._dispatch("corr", other=None, pairwise=None) + cov = GroupByMixin._dispatch("cov", other=None, pairwise=None) + + def _apply( + self, func, name=None, window=None, center=None, check_minp=None, **kwargs + ): + """ + Dispatch to apply; we are stripping all of the _apply kwargs and + performing the original function call on the grouped object. + """ + + def f(x, name=name, *args): + x = self._shallow_copy(x) + + if isinstance(name, str): + return getattr(x, name)(*args, **kwargs) + + return x.apply(name, *args, **kwargs) + + return self._groupby.apply(f) + + +def _flex_binary_moment(arg1, arg2, f, pairwise=False): + + if not ( + isinstance(arg1, (np.ndarray, ABCSeries, ABCDataFrame)) + and isinstance(arg2, (np.ndarray, ABCSeries, ABCDataFrame)) + ): + raise TypeError( + "arguments to moment function must be of type " + "np.ndarray/Series/DataFrame" + ) + + if isinstance(arg1, (np.ndarray, ABCSeries)) and isinstance( + arg2, (np.ndarray, ABCSeries) + ): + X, Y = _prep_binary(arg1, arg2) + return f(X, Y) + + elif isinstance(arg1, ABCDataFrame): + from pandas import DataFrame + + def dataframe_from_int_dict(data, frame_template): + result = DataFrame(data, index=frame_template.index) + if len(result.columns) > 0: + result.columns = frame_template.columns[result.columns] + return result + + results = {} + if isinstance(arg2, ABCDataFrame): + if pairwise is False: + if arg1 is arg2: + # special case in order to handle duplicate column names + for i, col in enumerate(arg1.columns): + results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i]) + return dataframe_from_int_dict(results, arg1) + else: + if not arg1.columns.is_unique: + raise ValueError("'arg1' columns are not unique") + if not arg2.columns.is_unique: + raise ValueError("'arg2' columns are not unique") + with warnings.catch_warnings(record=True): + warnings.simplefilter("ignore", RuntimeWarning) + X, Y = arg1.align(arg2, join="outer") + X = X + 0 * Y + Y = Y + 0 * X + + with warnings.catch_warnings(record=True): + warnings.simplefilter("ignore", RuntimeWarning) + res_columns = arg1.columns.union(arg2.columns) + for col in res_columns: + if col in X and col in Y: + results[col] = f(X[col], Y[col]) + return DataFrame(results, index=X.index, columns=res_columns) + elif pairwise is True: + results = defaultdict(dict) + for i, k1 in enumerate(arg1.columns): + for j, k2 in enumerate(arg2.columns): + if j < i and arg2 is arg1: + # Symmetric case + results[i][j] = results[j][i] + else: + results[i][j] = f( + *_prep_binary(arg1.iloc[:, i], arg2.iloc[:, j]) + ) + + from pandas import concat + + result_index = arg1.index.union(arg2.index) + if len(result_index): + + # construct result frame + result = concat( + [ + concat( + [results[i][j] for j, c in enumerate(arg2.columns)], + ignore_index=True, + ) + for i, c in enumerate(arg1.columns) + ], + ignore_index=True, + axis=1, + ) + result.columns = arg1.columns + + # set the index and reorder + if arg2.columns.nlevels > 1: + result.index = MultiIndex.from_product( + arg2.columns.levels + [result_index] + ) + result = result.reorder_levels([2, 0, 1]).sort_index() + else: + result.index = MultiIndex.from_product( + [range(len(arg2.columns)), range(len(result_index))] + ) + result = result.swaplevel(1, 0).sort_index() + result.index = MultiIndex.from_product( + [result_index] + [arg2.columns] + ) + else: + + # empty result + result = DataFrame( + index=MultiIndex( + levels=[arg1.index, arg2.columns], codes=[[], []] + ), + columns=arg2.columns, + dtype="float64", + ) + + # reset our index names to arg1 names + # reset our column names to arg2 names + # careful not to mutate the original names + result.columns = result.columns.set_names(arg1.columns.names) + result.index = result.index.set_names( + result_index.names + arg2.columns.names + ) + + return result + + else: + raise ValueError("'pairwise' is not True/False") + else: + results = { + i: f(*_prep_binary(arg1.iloc[:, i], arg2)) + for i, col in enumerate(arg1.columns) + } + return dataframe_from_int_dict(results, arg1) + + else: + return _flex_binary_moment(arg2, arg1, f) + + +def _get_center_of_mass(comass, span, halflife, alpha): + valid_count = com.count_not_none(comass, span, halflife, alpha) + if valid_count > 1: + raise ValueError("comass, span, halflife, and alpha are mutually exclusive") + + # Convert to center of mass; domain checks ensure 0 < alpha <= 1 + if comass is not None: + if comass < 0: + raise ValueError("comass must satisfy: comass >= 0") + elif span is not None: + if span < 1: + raise ValueError("span must satisfy: span >= 1") + comass = (span - 1) / 2.0 + elif halflife is not None: + if halflife <= 0: + raise ValueError("halflife must satisfy: halflife > 0") + decay = 1 - np.exp(np.log(0.5) / halflife) + comass = 1 / decay - 1 + elif alpha is not None: + if alpha <= 0 or alpha > 1: + raise ValueError("alpha must satisfy: 0 < alpha <= 1") + comass = (1.0 - alpha) / alpha + else: + raise ValueError("Must pass one of comass, span, halflife, or alpha") + + return float(comass) + + +def _offset(window, center): + if not is_integer(window): + window = len(window) + offset = (window - 1) / 2.0 if center else 0 + try: + return int(offset) + except TypeError: + return offset.astype(int) + + +def _require_min_periods(p): + def _check_func(minp, window): + if minp is None: + return window + else: + return max(p, minp) + + return _check_func + + +def _use_window(minp, window): + if minp is None: + return window + else: + return minp + + +def _zsqrt(x): + with np.errstate(all="ignore"): + result = np.sqrt(x) + mask = x < 0 + + if isinstance(x, ABCDataFrame): + if mask.values.any(): + result[mask] = 0 + else: + if mask.any(): + result[mask] = 0 + + return result + + +def _prep_binary(arg1, arg2): + if not isinstance(arg2, type(arg1)): + raise Exception("Input arrays must be of the same type!") + + # mask out values, this also makes a common index... + X = arg1 + 0 * arg2 + Y = arg2 + 0 * arg1 + + return X, Y diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py new file mode 100644 index 0000000000000..0ce6d5ddec2ad --- /dev/null +++ b/pandas/core/window/ewm.py @@ -0,0 +1,388 @@ +from textwrap import dedent + +import numpy as np + +import pandas._libs.window as libwindow +from pandas.compat.numpy import function as nv +from pandas.util._decorators import Appender, Substitution + +from pandas.core.dtypes.generic import ABCDataFrame + +from pandas.core.base import DataError +from pandas.core.window.common import _doc_template, _get_center_of_mass, _shared_docs +from pandas.core.window.rolling import _flex_binary_moment, _Rolling, _zsqrt + +_bias_template = """ + Parameters + ---------- + bias : bool, default False + Use a standard estimation bias correction. + *args, **kwargs + Arguments and keyword arguments to be passed into func. +""" + +_pairwise_template = """ + Parameters + ---------- + other : Series, DataFrame, or ndarray, optional + If not supplied then will default to self and produce pairwise + output. + pairwise : bool, default None + If False then only matching columns between self and other will be + used and the output will be a DataFrame. + If True then all pairwise combinations will be calculated and the + output will be a MultiIndex DataFrame in the case of DataFrame + inputs. In the case of missing elements, only complete pairwise + observations will be used. + bias : bool, default False + Use a standard estimation bias correction. + **kwargs + Keyword arguments to be passed into func. +""" + + +class EWM(_Rolling): + r""" + Provide exponential weighted functions. + + Parameters + ---------- + com : float, optional + Specify decay in terms of center of mass, + :math:`\alpha = 1 / (1 + com),\text{ for } com \geq 0`. + span : float, optional + Specify decay in terms of span, + :math:`\alpha = 2 / (span + 1),\text{ for } span \geq 1`. + halflife : float, optional + Specify decay in terms of half-life, + :math:`\alpha = 1 - exp(log(0.5) / halflife),\text{for} halflife > 0`. + alpha : float, optional + Specify smoothing factor :math:`\alpha` directly, + :math:`0 < \alpha \leq 1`. + min_periods : int, default 0 + Minimum number of observations in window required to have a value + (otherwise result is NA). + adjust : bool, default True + Divide by decaying adjustment factor in beginning periods to account + for imbalance in relative weightings + (viewing EWMA as a moving average). + ignore_na : bool, default False + Ignore missing values when calculating weights; + specify True to reproduce pre-0.15.0 behavior. + axis : {0 or 'index', 1 or 'columns'}, default 0 + The axis to use. The value 0 identifies the rows, and 1 + identifies the columns. + + Returns + ------- + DataFrame + A Window sub-classed for the particular operation. + + See Also + -------- + rolling : Provides rolling window calculations. + expanding : Provides expanding transformations. + + Notes + ----- + Exactly one of center of mass, span, half-life, and alpha must be provided. + Allowed values and relationship between the parameters are specified in the + parameter descriptions above; see the link at the end of this section for + a detailed explanation. + + When adjust is True (default), weighted averages are calculated using + weights (1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1. + + When adjust is False, weighted averages are calculated recursively as: + weighted_average[0] = arg[0]; + weighted_average[i] = (1-alpha)*weighted_average[i-1] + alpha*arg[i]. + + When ignore_na is False (default), weights are based on absolute positions. + For example, the weights of x and y used in calculating the final weighted + average of [x, None, y] are (1-alpha)**2 and 1 (if adjust is True), and + (1-alpha)**2 and alpha (if adjust is False). + + When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based + on relative positions. For example, the weights of x and y used in + calculating the final weighted average of [x, None, y] are 1-alpha and 1 + (if adjust is True), and 1-alpha and alpha (if adjust is False). + + More details can be found at + http://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows + + Examples + -------- + + >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}) + >>> df + B + 0 0.0 + 1 1.0 + 2 2.0 + 3 NaN + 4 4.0 + + >>> df.ewm(com=0.5).mean() + B + 0 0.000000 + 1 0.750000 + 2 1.615385 + 3 1.615385 + 4 3.670213 + """ + _attributes = ["com", "min_periods", "adjust", "ignore_na", "axis"] + + def __init__( + self, + obj, + com=None, + span=None, + halflife=None, + alpha=None, + min_periods=0, + adjust=True, + ignore_na=False, + axis=0, + ): + self.obj = obj + self.com = _get_center_of_mass(com, span, halflife, alpha) + self.min_periods = min_periods + self.adjust = adjust + self.ignore_na = ignore_na + self.axis = axis + self.on = None + + @property + def _constructor(self): + return EWM + + _agg_see_also_doc = dedent( + """ + See Also + -------- + pandas.DataFrame.rolling.aggregate + """ + ) + + _agg_examples_doc = dedent( + """ + Examples + -------- + + >>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C']) + >>> df + A B C + 0 -2.385977 -0.102758 0.438822 + 1 -1.004295 0.905829 -0.954544 + 2 0.735167 -0.165272 -1.619346 + 3 -0.702657 -1.340923 -0.706334 + 4 -0.246845 0.211596 -0.901819 + 5 2.463718 3.157577 -1.380906 + 6 -1.142255 2.340594 -0.039875 + 7 1.396598 -1.647453 1.677227 + 8 -0.543425 1.761277 -0.220481 + 9 -0.640505 0.289374 -1.550670 + + >>> df.ewm(alpha=0.5).mean() + A B C + 0 -2.385977 -0.102758 0.438822 + 1 -1.464856 0.569633 -0.490089 + 2 -0.207700 0.149687 -1.135379 + 3 -0.471677 -0.645305 -0.906555 + 4 -0.355635 -0.203033 -0.904111 + 5 1.076417 1.503943 -1.146293 + 6 -0.041654 1.925562 -0.588728 + 7 0.680292 0.132049 0.548693 + 8 0.067236 0.948257 0.163353 + 9 -0.286980 0.618493 -0.694496 + """ + ) + + @Substitution( + see_also=_agg_see_also_doc, + examples=_agg_examples_doc, + versionadded="", + klass="Series/Dataframe", + axis="", + ) + @Appender(_shared_docs["aggregate"]) + def aggregate(self, arg, *args, **kwargs): + return super().aggregate(arg, *args, **kwargs) + + agg = aggregate + + def _apply(self, func, **kwargs): + """ + Rolling statistical measure using supplied function. Designed to be + used with passed-in Cython array-based functions. + + Parameters + ---------- + func : str/callable to apply + + Returns + ------- + y : same type as input argument + """ + blocks, obj = self._create_blocks() + block_list = list(blocks) + + results = [] + exclude = [] + for i, b in enumerate(blocks): + try: + values = self._prep_values(b.values) + + except (TypeError, NotImplementedError): + if isinstance(obj, ABCDataFrame): + exclude.extend(b.columns) + del block_list[i] + continue + else: + raise DataError("No numeric types to aggregate") + + if values.size == 0: + results.append(values.copy()) + continue + + # if we have a string function name, wrap it + if isinstance(func, str): + cfunc = getattr(libwindow, func, None) + if cfunc is None: + raise ValueError( + "we do not support this function " + "in libwindow.{func}".format(func=func) + ) + + def func(arg): + return cfunc( + arg, + self.com, + int(self.adjust), + int(self.ignore_na), + int(self.min_periods), + ) + + results.append(np.apply_along_axis(func, self.axis, values)) + + return self._wrap_results(results, block_list, obj, exclude) + + @Substitution(name="ewm") + @Appender(_doc_template) + def mean(self, *args, **kwargs): + """ + Exponential weighted moving average. + + Parameters + ---------- + *args, **kwargs + Arguments and keyword arguments to be passed into func. + """ + nv.validate_window_func("mean", args, kwargs) + return self._apply("ewma", **kwargs) + + @Substitution(name="ewm") + @Appender(_doc_template) + @Appender(_bias_template) + def std(self, bias=False, *args, **kwargs): + """ + Exponential weighted moving stddev. + """ + nv.validate_window_func("std", args, kwargs) + return _zsqrt(self.var(bias=bias, **kwargs)) + + vol = std + + @Substitution(name="ewm") + @Appender(_doc_template) + @Appender(_bias_template) + def var(self, bias=False, *args, **kwargs): + """ + Exponential weighted moving variance. + """ + nv.validate_window_func("var", args, kwargs) + + def f(arg): + return libwindow.ewmcov( + arg, + arg, + self.com, + int(self.adjust), + int(self.ignore_na), + int(self.min_periods), + int(bias), + ) + + return self._apply(f, **kwargs) + + @Substitution(name="ewm") + @Appender(_doc_template) + @Appender(_pairwise_template) + def cov(self, other=None, pairwise=None, bias=False, **kwargs): + """ + Exponential weighted sample covariance. + """ + if other is None: + other = self._selected_obj + # only default unset + pairwise = True if pairwise is None else pairwise + other = self._shallow_copy(other) + + def _get_cov(X, Y): + X = self._shallow_copy(X) + Y = self._shallow_copy(Y) + cov = libwindow.ewmcov( + X._prep_values(), + Y._prep_values(), + self.com, + int(self.adjust), + int(self.ignore_na), + int(self.min_periods), + int(bias), + ) + return X._wrap_result(cov) + + return _flex_binary_moment( + self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise) + ) + + @Substitution(name="ewm") + @Appender(_doc_template) + @Appender(_pairwise_template) + def corr(self, other=None, pairwise=None, **kwargs): + """ + Exponential weighted sample correlation. + """ + if other is None: + other = self._selected_obj + # only default unset + pairwise = True if pairwise is None else pairwise + other = self._shallow_copy(other) + + def _get_corr(X, Y): + X = self._shallow_copy(X) + Y = self._shallow_copy(Y) + + def _cov(x, y): + return libwindow.ewmcov( + x, + y, + self.com, + int(self.adjust), + int(self.ignore_na), + int(self.min_periods), + 1, + ) + + x_values = X._prep_values() + y_values = Y._prep_values() + with np.errstate(all="ignore"): + cov = _cov(x_values, y_values) + x_var = _cov(x_values, x_values) + y_var = _cov(y_values, y_values) + corr = cov / _zsqrt(x_var * y_var) + return X._wrap_result(corr) + + return _flex_binary_moment( + self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise) + ) diff --git a/pandas/core/window/expanding.py b/pandas/core/window/expanding.py new file mode 100644 index 0000000000000..c43ca6b0565f3 --- /dev/null +++ b/pandas/core/window/expanding.py @@ -0,0 +1,260 @@ +from textwrap import dedent + +from pandas.compat.numpy import function as nv +from pandas.util._decorators import Appender, Substitution + +from pandas.core.window.common import _doc_template, _GroupByMixin, _shared_docs +from pandas.core.window.rolling import _Rolling_and_Expanding + + +class Expanding(_Rolling_and_Expanding): + """ + Provide expanding transformations. + + Parameters + ---------- + min_periods : int, default 1 + Minimum number of observations in window required to have a value + (otherwise result is NA). + center : bool, default False + Set the labels at the center of the window. + axis : int or str, default 0 + + Returns + ------- + a Window sub-classed for the particular operation + + See Also + -------- + rolling : Provides rolling window calculations. + ewm : Provides exponential weighted functions. + + Notes + ----- + By default, the result is set to the right edge of the window. This can be + changed to the center of the window by setting ``center=True``. + + Examples + -------- + + >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}) + B + 0 0.0 + 1 1.0 + 2 2.0 + 3 NaN + 4 4.0 + + >>> df.expanding(2).sum() + B + 0 NaN + 1 1.0 + 2 3.0 + 3 3.0 + 4 7.0 + """ + + _attributes = ["min_periods", "center", "axis"] + + def __init__(self, obj, min_periods=1, center=False, axis=0, **kwargs): + super().__init__(obj=obj, min_periods=min_periods, center=center, axis=axis) + + @property + def _constructor(self): + return Expanding + + def _get_window(self, other=None, **kwargs): + """ + Get the window length over which to perform some operation. + + Parameters + ---------- + other : object, default None + The other object that is involved in the operation. + Such an object is involved for operations like covariance. + + Returns + ------- + window : int + The window length. + """ + axis = self.obj._get_axis(self.axis) + length = len(axis) + (other is not None) * len(axis) + + other = self.min_periods or -1 + return max(length, other) + + _agg_see_also_doc = dedent( + """ + See Also + -------- + DataFrame.expanding.aggregate + DataFrame.rolling.aggregate + DataFrame.aggregate + """ + ) + + _agg_examples_doc = dedent( + """ + Examples + -------- + + >>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C']) + >>> df + A B C + 0 -2.385977 -0.102758 0.438822 + 1 -1.004295 0.905829 -0.954544 + 2 0.735167 -0.165272 -1.619346 + 3 -0.702657 -1.340923 -0.706334 + 4 -0.246845 0.211596 -0.901819 + 5 2.463718 3.157577 -1.380906 + 6 -1.142255 2.340594 -0.039875 + 7 1.396598 -1.647453 1.677227 + 8 -0.543425 1.761277 -0.220481 + 9 -0.640505 0.289374 -1.550670 + + >>> df.ewm(alpha=0.5).mean() + A B C + 0 -2.385977 -0.102758 0.438822 + 1 -1.464856 0.569633 -0.490089 + 2 -0.207700 0.149687 -1.135379 + 3 -0.471677 -0.645305 -0.906555 + 4 -0.355635 -0.203033 -0.904111 + 5 1.076417 1.503943 -1.146293 + 6 -0.041654 1.925562 -0.588728 + 7 0.680292 0.132049 0.548693 + 8 0.067236 0.948257 0.163353 + 9 -0.286980 0.618493 -0.694496 + """ + ) + + @Substitution( + see_also=_agg_see_also_doc, + examples=_agg_examples_doc, + versionadded="", + klass="Series/Dataframe", + axis="", + ) + @Appender(_shared_docs["aggregate"]) + def aggregate(self, arg, *args, **kwargs): + return super().aggregate(arg, *args, **kwargs) + + agg = aggregate + + @Substitution(name="expanding") + @Appender(_shared_docs["count"]) + def count(self, **kwargs): + return super().count(**kwargs) + + @Substitution(name="expanding") + @Appender(_shared_docs["apply"]) + def apply(self, func, raw=None, args=(), kwargs={}): + return super().apply(func, raw=raw, args=args, kwargs=kwargs) + + @Substitution(name="expanding") + @Appender(_shared_docs["sum"]) + def sum(self, *args, **kwargs): + nv.validate_expanding_func("sum", args, kwargs) + return super().sum(*args, **kwargs) + + @Substitution(name="expanding") + @Appender(_doc_template) + @Appender(_shared_docs["max"]) + def max(self, *args, **kwargs): + nv.validate_expanding_func("max", args, kwargs) + return super().max(*args, **kwargs) + + @Substitution(name="expanding") + @Appender(_shared_docs["min"]) + def min(self, *args, **kwargs): + nv.validate_expanding_func("min", args, kwargs) + return super().min(*args, **kwargs) + + @Substitution(name="expanding") + @Appender(_shared_docs["mean"]) + def mean(self, *args, **kwargs): + nv.validate_expanding_func("mean", args, kwargs) + return super().mean(*args, **kwargs) + + @Substitution(name="expanding") + @Appender(_shared_docs["median"]) + def median(self, **kwargs): + return super().median(**kwargs) + + @Substitution(name="expanding") + @Appender(_shared_docs["std"]) + def std(self, ddof=1, *args, **kwargs): + nv.validate_expanding_func("std", args, kwargs) + return super().std(ddof=ddof, **kwargs) + + @Substitution(name="expanding") + @Appender(_shared_docs["var"]) + def var(self, ddof=1, *args, **kwargs): + nv.validate_expanding_func("var", args, kwargs) + return super().var(ddof=ddof, **kwargs) + + @Substitution(name="expanding") + @Appender(_doc_template) + @Appender(_shared_docs["skew"]) + def skew(self, **kwargs): + return super().skew(**kwargs) + + _agg_doc = dedent( + """ + Examples + -------- + + The example below will show an expanding calculation with a window size of + four matching the equivalent function call using `scipy.stats`. + + >>> arr = [1, 2, 3, 4, 999] + >>> import scipy.stats + >>> fmt = "{0:.6f}" # limit the printed precision to 6 digits + >>> print(fmt.format(scipy.stats.kurtosis(arr[:-1], bias=False))) + -1.200000 + >>> print(fmt.format(scipy.stats.kurtosis(arr, bias=False))) + 4.999874 + >>> s = pd.Series(arr) + >>> s.expanding(4).kurt() + 0 NaN + 1 NaN + 2 NaN + 3 -1.200000 + 4 4.999874 + dtype: float64 + """ + ) + + @Appender(_agg_doc) + @Substitution(name="expanding") + @Appender(_shared_docs["kurt"]) + def kurt(self, **kwargs): + return super().kurt(**kwargs) + + @Substitution(name="expanding") + @Appender(_shared_docs["quantile"]) + def quantile(self, quantile, interpolation="linear", **kwargs): + return super().quantile( + quantile=quantile, interpolation=interpolation, **kwargs + ) + + @Substitution(name="expanding") + @Appender(_doc_template) + @Appender(_shared_docs["cov"]) + def cov(self, other=None, pairwise=None, ddof=1, **kwargs): + return super().cov(other=other, pairwise=pairwise, ddof=ddof, **kwargs) + + @Substitution(name="expanding") + @Appender(_shared_docs["corr"]) + def corr(self, other=None, pairwise=None, **kwargs): + return super().corr(other=other, pairwise=pairwise, **kwargs) + + +class ExpandingGroupby(_GroupByMixin, Expanding): + """ + Provide a expanding groupby implementation. + """ + + @property + def _constructor(self): + return Expanding diff --git a/pandas/core/window.py b/pandas/core/window/rolling.py similarity index 66% rename from pandas/core/window.py rename to pandas/core/window/rolling.py index 3e3f17369db7b..323089b3fdf6b 100644 --- a/pandas/core/window.py +++ b/pandas/core/window/rolling.py @@ -2,7 +2,6 @@ Provide a generic structure to support window functions, similar to how we have a Groupby object. """ -from collections import defaultdict from datetime import timedelta from textwrap import dedent from typing import Callable, List, Optional, Set, Union @@ -38,22 +37,17 @@ from pandas._typing import Axis, FrameOrSeries, Scalar from pandas.core.base import DataError, PandasObject, SelectionMixin import pandas.core.common as com -from pandas.core.generic import _shared_docs -from pandas.core.groupby.base import GroupByMixin -from pandas.core.index import Index, MultiIndex, ensure_index - -_shared_docs = dict(**_shared_docs) -_doc_template = """ - Returns - ------- - Series or DataFrame - Return type is determined by the caller. - - See Also - -------- - Series.%(name)s : Series %(name)s. - DataFrame.%(name)s : DataFrame %(name)s. -""" +from pandas.core.index import Index, ensure_index +from pandas.core.window.common import ( + _doc_template, + _flex_binary_moment, + _GroupByMixin, + _offset, + _require_min_periods, + _shared_docs, + _use_window, + _zsqrt, +) class _Window(PandasObject, SelectionMixin): @@ -121,6 +115,8 @@ def validate(self): "neither", ]: raise ValueError("closed must be 'right', 'left', 'both' or 'neither'") + if not isinstance(self.obj, (ABCSeries, ABCDataFrame)): + raise TypeError("invalid type: {}".format(type(self))) def _create_blocks(self): """ @@ -929,44 +925,6 @@ def mean(self, *args, **kwargs): return self._apply("roll_weighted_mean", **kwargs) -class _GroupByMixin(GroupByMixin): - """ - Provide the groupby facilities. - """ - - def __init__(self, obj, *args, **kwargs): - parent = kwargs.pop("parent", None) # noqa - groupby = kwargs.pop("groupby", None) - if groupby is None: - groupby, obj = obj, obj.obj - self._groupby = groupby - self._groupby.mutated = True - self._groupby.grouper.mutated = True - super().__init__(obj, *args, **kwargs) - - count = GroupByMixin._dispatch("count") - corr = GroupByMixin._dispatch("corr", other=None, pairwise=None) - cov = GroupByMixin._dispatch("cov", other=None, pairwise=None) - - def _apply( - self, func, name=None, window=None, center=None, check_minp=None, **kwargs - ): - """ - Dispatch to apply; we are stripping all of the _apply kwargs and - performing the original function call on the grouped object. - """ - - def f(x, name=name, *args): - x = self._shallow_copy(x) - - if isinstance(name, str): - return getattr(x, name)(*args, **kwargs) - - return x.apply(name, *args, **kwargs) - - return self._groupby.apply(f) - - class _Rolling(_Window): @property def _constructor(self): @@ -1949,6 +1907,9 @@ def corr(self, other=None, pairwise=None, **kwargs): return super().corr(other=other, pairwise=pairwise, **kwargs) +Rolling.__doc__ = Window.__doc__ + + class RollingGroupby(_GroupByMixin, Rolling): """ Provide a rolling groupby implementation. @@ -1976,883 +1937,3 @@ def _validate_monotonic(self): level. """ pass - - -class Expanding(_Rolling_and_Expanding): - """ - Provide expanding transformations. - - Parameters - ---------- - min_periods : int, default 1 - Minimum number of observations in window required to have a value - (otherwise result is NA). - center : bool, default False - Set the labels at the center of the window. - axis : int or str, default 0 - - Returns - ------- - a Window sub-classed for the particular operation - - See Also - -------- - rolling : Provides rolling window calculations. - ewm : Provides exponential weighted functions. - - Notes - ----- - By default, the result is set to the right edge of the window. This can be - changed to the center of the window by setting ``center=True``. - - Examples - -------- - - >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}) - B - 0 0.0 - 1 1.0 - 2 2.0 - 3 NaN - 4 4.0 - - >>> df.expanding(2).sum() - B - 0 NaN - 1 1.0 - 2 3.0 - 3 3.0 - 4 7.0 - """ - - _attributes = ["min_periods", "center", "axis"] - - def __init__(self, obj, min_periods=1, center=False, axis=0, **kwargs): - super().__init__(obj=obj, min_periods=min_periods, center=center, axis=axis) - - @property - def _constructor(self): - return Expanding - - def _get_window(self, other=None, **kwargs): - """ - Get the window length over which to perform some operation. - - Parameters - ---------- - other : object, default None - The other object that is involved in the operation. - Such an object is involved for operations like covariance. - - Returns - ------- - window : int - The window length. - """ - axis = self.obj._get_axis(self.axis) - length = len(axis) + (other is not None) * len(axis) - - other = self.min_periods or -1 - return max(length, other) - - _agg_see_also_doc = dedent( - """ - See Also - -------- - DataFrame.expanding.aggregate - DataFrame.rolling.aggregate - DataFrame.aggregate - """ - ) - - _agg_examples_doc = dedent( - """ - Examples - -------- - - >>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C']) - >>> df - A B C - 0 -2.385977 -0.102758 0.438822 - 1 -1.004295 0.905829 -0.954544 - 2 0.735167 -0.165272 -1.619346 - 3 -0.702657 -1.340923 -0.706334 - 4 -0.246845 0.211596 -0.901819 - 5 2.463718 3.157577 -1.380906 - 6 -1.142255 2.340594 -0.039875 - 7 1.396598 -1.647453 1.677227 - 8 -0.543425 1.761277 -0.220481 - 9 -0.640505 0.289374 -1.550670 - - >>> df.ewm(alpha=0.5).mean() - A B C - 0 -2.385977 -0.102758 0.438822 - 1 -1.464856 0.569633 -0.490089 - 2 -0.207700 0.149687 -1.135379 - 3 -0.471677 -0.645305 -0.906555 - 4 -0.355635 -0.203033 -0.904111 - 5 1.076417 1.503943 -1.146293 - 6 -0.041654 1.925562 -0.588728 - 7 0.680292 0.132049 0.548693 - 8 0.067236 0.948257 0.163353 - 9 -0.286980 0.618493 -0.694496 - """ - ) - - @Substitution( - see_also=_agg_see_also_doc, - examples=_agg_examples_doc, - versionadded="", - klass="Series/Dataframe", - axis="", - ) - @Appender(_shared_docs["aggregate"]) - def aggregate(self, arg, *args, **kwargs): - return super().aggregate(arg, *args, **kwargs) - - agg = aggregate - - @Substitution(name="expanding") - @Appender(_shared_docs["count"]) - def count(self, **kwargs): - return super().count(**kwargs) - - @Substitution(name="expanding") - @Appender(_shared_docs["apply"]) - def apply(self, func, raw=None, args=(), kwargs={}): - return super().apply(func, raw=raw, args=args, kwargs=kwargs) - - @Substitution(name="expanding") - @Appender(_shared_docs["sum"]) - def sum(self, *args, **kwargs): - nv.validate_expanding_func("sum", args, kwargs) - return super().sum(*args, **kwargs) - - @Substitution(name="expanding") - @Appender(_doc_template) - @Appender(_shared_docs["max"]) - def max(self, *args, **kwargs): - nv.validate_expanding_func("max", args, kwargs) - return super().max(*args, **kwargs) - - @Substitution(name="expanding") - @Appender(_shared_docs["min"]) - def min(self, *args, **kwargs): - nv.validate_expanding_func("min", args, kwargs) - return super().min(*args, **kwargs) - - @Substitution(name="expanding") - @Appender(_shared_docs["mean"]) - def mean(self, *args, **kwargs): - nv.validate_expanding_func("mean", args, kwargs) - return super().mean(*args, **kwargs) - - @Substitution(name="expanding") - @Appender(_shared_docs["median"]) - def median(self, **kwargs): - return super().median(**kwargs) - - @Substitution(name="expanding") - @Appender(_shared_docs["std"]) - def std(self, ddof=1, *args, **kwargs): - nv.validate_expanding_func("std", args, kwargs) - return super().std(ddof=ddof, **kwargs) - - @Substitution(name="expanding") - @Appender(_shared_docs["var"]) - def var(self, ddof=1, *args, **kwargs): - nv.validate_expanding_func("var", args, kwargs) - return super().var(ddof=ddof, **kwargs) - - @Substitution(name="expanding") - @Appender(_doc_template) - @Appender(_shared_docs["skew"]) - def skew(self, **kwargs): - return super().skew(**kwargs) - - _agg_doc = dedent( - """ - Examples - -------- - - The example below will show an expanding calculation with a window size of - four matching the equivalent function call using `scipy.stats`. - - >>> arr = [1, 2, 3, 4, 999] - >>> import scipy.stats - >>> fmt = "{0:.6f}" # limit the printed precision to 6 digits - >>> print(fmt.format(scipy.stats.kurtosis(arr[:-1], bias=False))) - -1.200000 - >>> print(fmt.format(scipy.stats.kurtosis(arr, bias=False))) - 4.999874 - >>> s = pd.Series(arr) - >>> s.expanding(4).kurt() - 0 NaN - 1 NaN - 2 NaN - 3 -1.200000 - 4 4.999874 - dtype: float64 - """ - ) - - @Appender(_agg_doc) - @Substitution(name="expanding") - @Appender(_shared_docs["kurt"]) - def kurt(self, **kwargs): - return super().kurt(**kwargs) - - @Substitution(name="expanding") - @Appender(_shared_docs["quantile"]) - def quantile(self, quantile, interpolation="linear", **kwargs): - return super().quantile( - quantile=quantile, interpolation=interpolation, **kwargs - ) - - @Substitution(name="expanding") - @Appender(_doc_template) - @Appender(_shared_docs["cov"]) - def cov(self, other=None, pairwise=None, ddof=1, **kwargs): - return super().cov(other=other, pairwise=pairwise, ddof=ddof, **kwargs) - - @Substitution(name="expanding") - @Appender(_shared_docs["corr"]) - def corr(self, other=None, pairwise=None, **kwargs): - return super().corr(other=other, pairwise=pairwise, **kwargs) - - -class ExpandingGroupby(_GroupByMixin, Expanding): - """ - Provide a expanding groupby implementation. - """ - - @property - def _constructor(self): - return Expanding - - -_bias_template = """ - Parameters - ---------- - bias : bool, default False - Use a standard estimation bias correction. - *args, **kwargs - Arguments and keyword arguments to be passed into func. -""" - -_pairwise_template = """ - Parameters - ---------- - other : Series, DataFrame, or ndarray, optional - If not supplied then will default to self and produce pairwise - output. - pairwise : bool, default None - If False then only matching columns between self and other will be - used and the output will be a DataFrame. - If True then all pairwise combinations will be calculated and the - output will be a MultiIndex DataFrame in the case of DataFrame - inputs. In the case of missing elements, only complete pairwise - observations will be used. - bias : bool, default False - Use a standard estimation bias correction. - **kwargs - Keyword arguments to be passed into func. -""" - - -class EWM(_Rolling): - r""" - Provide exponential weighted functions. - - Parameters - ---------- - com : float, optional - Specify decay in terms of center of mass, - :math:`\alpha = 1 / (1 + com),\text{ for } com \geq 0`. - span : float, optional - Specify decay in terms of span, - :math:`\alpha = 2 / (span + 1),\text{ for } span \geq 1`. - halflife : float, optional - Specify decay in terms of half-life, - :math:`\alpha = 1 - exp(log(0.5) / halflife),\text{for} halflife > 0`. - alpha : float, optional - Specify smoothing factor :math:`\alpha` directly, - :math:`0 < \alpha \leq 1`. - min_periods : int, default 0 - Minimum number of observations in window required to have a value - (otherwise result is NA). - adjust : bool, default True - Divide by decaying adjustment factor in beginning periods to account - for imbalance in relative weightings - (viewing EWMA as a moving average). - ignore_na : bool, default False - Ignore missing values when calculating weights; - specify True to reproduce pre-0.15.0 behavior. - axis : {0 or 'index', 1 or 'columns'}, default 0 - The axis to use. The value 0 identifies the rows, and 1 - identifies the columns. - - Returns - ------- - DataFrame - A Window sub-classed for the particular operation. - - See Also - -------- - rolling : Provides rolling window calculations. - expanding : Provides expanding transformations. - - Notes - ----- - Exactly one of center of mass, span, half-life, and alpha must be provided. - Allowed values and relationship between the parameters are specified in the - parameter descriptions above; see the link at the end of this section for - a detailed explanation. - - When adjust is True (default), weighted averages are calculated using - weights (1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1. - - When adjust is False, weighted averages are calculated recursively as: - weighted_average[0] = arg[0]; - weighted_average[i] = (1-alpha)*weighted_average[i-1] + alpha*arg[i]. - - When ignore_na is False (default), weights are based on absolute positions. - For example, the weights of x and y used in calculating the final weighted - average of [x, None, y] are (1-alpha)**2 and 1 (if adjust is True), and - (1-alpha)**2 and alpha (if adjust is False). - - When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based - on relative positions. For example, the weights of x and y used in - calculating the final weighted average of [x, None, y] are 1-alpha and 1 - (if adjust is True), and 1-alpha and alpha (if adjust is False). - - More details can be found at - http://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows - - Examples - -------- - - >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}) - >>> df - B - 0 0.0 - 1 1.0 - 2 2.0 - 3 NaN - 4 4.0 - - >>> df.ewm(com=0.5).mean() - B - 0 0.000000 - 1 0.750000 - 2 1.615385 - 3 1.615385 - 4 3.670213 - """ - _attributes = ["com", "min_periods", "adjust", "ignore_na", "axis"] - - def __init__( - self, - obj, - com=None, - span=None, - halflife=None, - alpha=None, - min_periods=0, - adjust=True, - ignore_na=False, - axis=0, - ): - self.obj = obj - self.com = _get_center_of_mass(com, span, halflife, alpha) - self.min_periods = min_periods - self.adjust = adjust - self.ignore_na = ignore_na - self.axis = axis - self.on = None - - @property - def _constructor(self): - return EWM - - _agg_see_also_doc = dedent( - """ - See Also - -------- - pandas.DataFrame.rolling.aggregate - """ - ) - - _agg_examples_doc = dedent( - """ - Examples - -------- - - >>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C']) - >>> df - A B C - 0 -2.385977 -0.102758 0.438822 - 1 -1.004295 0.905829 -0.954544 - 2 0.735167 -0.165272 -1.619346 - 3 -0.702657 -1.340923 -0.706334 - 4 -0.246845 0.211596 -0.901819 - 5 2.463718 3.157577 -1.380906 - 6 -1.142255 2.340594 -0.039875 - 7 1.396598 -1.647453 1.677227 - 8 -0.543425 1.761277 -0.220481 - 9 -0.640505 0.289374 -1.550670 - - >>> df.ewm(alpha=0.5).mean() - A B C - 0 -2.385977 -0.102758 0.438822 - 1 -1.464856 0.569633 -0.490089 - 2 -0.207700 0.149687 -1.135379 - 3 -0.471677 -0.645305 -0.906555 - 4 -0.355635 -0.203033 -0.904111 - 5 1.076417 1.503943 -1.146293 - 6 -0.041654 1.925562 -0.588728 - 7 0.680292 0.132049 0.548693 - 8 0.067236 0.948257 0.163353 - 9 -0.286980 0.618493 -0.694496 - """ - ) - - @Substitution( - see_also=_agg_see_also_doc, - examples=_agg_examples_doc, - versionadded="", - klass="Series/Dataframe", - axis="", - ) - @Appender(_shared_docs["aggregate"]) - def aggregate(self, arg, *args, **kwargs): - return super().aggregate(arg, *args, **kwargs) - - agg = aggregate - - def _apply(self, func, **kwargs): - """ - Rolling statistical measure using supplied function. Designed to be - used with passed-in Cython array-based functions. - - Parameters - ---------- - func : str/callable to apply - - Returns - ------- - y : same type as input argument - """ - blocks, obj = self._create_blocks() - block_list = list(blocks) - - results = [] - exclude = [] - for i, b in enumerate(blocks): - try: - values = self._prep_values(b.values) - - except (TypeError, NotImplementedError): - if isinstance(obj, ABCDataFrame): - exclude.extend(b.columns) - del block_list[i] - continue - else: - raise DataError("No numeric types to aggregate") - - if values.size == 0: - results.append(values.copy()) - continue - - # if we have a string function name, wrap it - if isinstance(func, str): - cfunc = getattr(libwindow, func, None) - if cfunc is None: - raise ValueError( - "we do not support this function " - "in libwindow.{func}".format(func=func) - ) - - def func(arg): - return cfunc( - arg, - self.com, - int(self.adjust), - int(self.ignore_na), - int(self.min_periods), - ) - - results.append(np.apply_along_axis(func, self.axis, values)) - - return self._wrap_results(results, block_list, obj, exclude) - - @Substitution(name="ewm") - @Appender(_doc_template) - def mean(self, *args, **kwargs): - """ - Exponential weighted moving average. - - Parameters - ---------- - *args, **kwargs - Arguments and keyword arguments to be passed into func. - """ - nv.validate_window_func("mean", args, kwargs) - return self._apply("ewma", **kwargs) - - @Substitution(name="ewm") - @Appender(_doc_template) - @Appender(_bias_template) - def std(self, bias=False, *args, **kwargs): - """ - Exponential weighted moving stddev. - """ - nv.validate_window_func("std", args, kwargs) - return _zsqrt(self.var(bias=bias, **kwargs)) - - vol = std - - @Substitution(name="ewm") - @Appender(_doc_template) - @Appender(_bias_template) - def var(self, bias=False, *args, **kwargs): - """ - Exponential weighted moving variance. - """ - nv.validate_window_func("var", args, kwargs) - - def f(arg): - return libwindow.ewmcov( - arg, - arg, - self.com, - int(self.adjust), - int(self.ignore_na), - int(self.min_periods), - int(bias), - ) - - return self._apply(f, **kwargs) - - @Substitution(name="ewm") - @Appender(_doc_template) - @Appender(_pairwise_template) - def cov(self, other=None, pairwise=None, bias=False, **kwargs): - """ - Exponential weighted sample covariance. - """ - if other is None: - other = self._selected_obj - # only default unset - pairwise = True if pairwise is None else pairwise - other = self._shallow_copy(other) - - def _get_cov(X, Y): - X = self._shallow_copy(X) - Y = self._shallow_copy(Y) - cov = libwindow.ewmcov( - X._prep_values(), - Y._prep_values(), - self.com, - int(self.adjust), - int(self.ignore_na), - int(self.min_periods), - int(bias), - ) - return X._wrap_result(cov) - - return _flex_binary_moment( - self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise) - ) - - @Substitution(name="ewm") - @Appender(_doc_template) - @Appender(_pairwise_template) - def corr(self, other=None, pairwise=None, **kwargs): - """ - Exponential weighted sample correlation. - """ - if other is None: - other = self._selected_obj - # only default unset - pairwise = True if pairwise is None else pairwise - other = self._shallow_copy(other) - - def _get_corr(X, Y): - X = self._shallow_copy(X) - Y = self._shallow_copy(Y) - - def _cov(x, y): - return libwindow.ewmcov( - x, - y, - self.com, - int(self.adjust), - int(self.ignore_na), - int(self.min_periods), - 1, - ) - - x_values = X._prep_values() - y_values = Y._prep_values() - with np.errstate(all="ignore"): - cov = _cov(x_values, y_values) - x_var = _cov(x_values, x_values) - y_var = _cov(y_values, y_values) - corr = cov / _zsqrt(x_var * y_var) - return X._wrap_result(corr) - - return _flex_binary_moment( - self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise) - ) - - -# Helper Funcs - - -def _flex_binary_moment(arg1, arg2, f, pairwise=False): - - if not ( - isinstance(arg1, (np.ndarray, ABCSeries, ABCDataFrame)) - and isinstance(arg2, (np.ndarray, ABCSeries, ABCDataFrame)) - ): - raise TypeError( - "arguments to moment function must be of type " - "np.ndarray/Series/DataFrame" - ) - - if isinstance(arg1, (np.ndarray, ABCSeries)) and isinstance( - arg2, (np.ndarray, ABCSeries) - ): - X, Y = _prep_binary(arg1, arg2) - return f(X, Y) - - elif isinstance(arg1, ABCDataFrame): - from pandas import DataFrame - - def dataframe_from_int_dict(data, frame_template): - result = DataFrame(data, index=frame_template.index) - if len(result.columns) > 0: - result.columns = frame_template.columns[result.columns] - return result - - results = {} - if isinstance(arg2, ABCDataFrame): - if pairwise is False: - if arg1 is arg2: - # special case in order to handle duplicate column names - for i, col in enumerate(arg1.columns): - results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i]) - return dataframe_from_int_dict(results, arg1) - else: - if not arg1.columns.is_unique: - raise ValueError("'arg1' columns are not unique") - if not arg2.columns.is_unique: - raise ValueError("'arg2' columns are not unique") - with warnings.catch_warnings(record=True): - warnings.simplefilter("ignore", RuntimeWarning) - X, Y = arg1.align(arg2, join="outer") - X = X + 0 * Y - Y = Y + 0 * X - - with warnings.catch_warnings(record=True): - warnings.simplefilter("ignore", RuntimeWarning) - res_columns = arg1.columns.union(arg2.columns) - for col in res_columns: - if col in X and col in Y: - results[col] = f(X[col], Y[col]) - return DataFrame(results, index=X.index, columns=res_columns) - elif pairwise is True: - results = defaultdict(dict) - for i, k1 in enumerate(arg1.columns): - for j, k2 in enumerate(arg2.columns): - if j < i and arg2 is arg1: - # Symmetric case - results[i][j] = results[j][i] - else: - results[i][j] = f( - *_prep_binary(arg1.iloc[:, i], arg2.iloc[:, j]) - ) - - from pandas import concat - - result_index = arg1.index.union(arg2.index) - if len(result_index): - - # construct result frame - result = concat( - [ - concat( - [results[i][j] for j, c in enumerate(arg2.columns)], - ignore_index=True, - ) - for i, c in enumerate(arg1.columns) - ], - ignore_index=True, - axis=1, - ) - result.columns = arg1.columns - - # set the index and reorder - if arg2.columns.nlevels > 1: - result.index = MultiIndex.from_product( - arg2.columns.levels + [result_index] - ) - result = result.reorder_levels([2, 0, 1]).sort_index() - else: - result.index = MultiIndex.from_product( - [range(len(arg2.columns)), range(len(result_index))] - ) - result = result.swaplevel(1, 0).sort_index() - result.index = MultiIndex.from_product( - [result_index] + [arg2.columns] - ) - else: - - # empty result - result = DataFrame( - index=MultiIndex( - levels=[arg1.index, arg2.columns], codes=[[], []] - ), - columns=arg2.columns, - dtype="float64", - ) - - # reset our index names to arg1 names - # reset our column names to arg2 names - # careful not to mutate the original names - result.columns = result.columns.set_names(arg1.columns.names) - result.index = result.index.set_names( - result_index.names + arg2.columns.names - ) - - return result - - else: - raise ValueError("'pairwise' is not True/False") - else: - results = { - i: f(*_prep_binary(arg1.iloc[:, i], arg2)) - for i, col in enumerate(arg1.columns) - } - return dataframe_from_int_dict(results, arg1) - - else: - return _flex_binary_moment(arg2, arg1, f) - - -def _get_center_of_mass(comass, span, halflife, alpha): - valid_count = com.count_not_none(comass, span, halflife, alpha) - if valid_count > 1: - raise ValueError("comass, span, halflife, and alpha are mutually exclusive") - - # Convert to center of mass; domain checks ensure 0 < alpha <= 1 - if comass is not None: - if comass < 0: - raise ValueError("comass must satisfy: comass >= 0") - elif span is not None: - if span < 1: - raise ValueError("span must satisfy: span >= 1") - comass = (span - 1) / 2.0 - elif halflife is not None: - if halflife <= 0: - raise ValueError("halflife must satisfy: halflife > 0") - decay = 1 - np.exp(np.log(0.5) / halflife) - comass = 1 / decay - 1 - elif alpha is not None: - if alpha <= 0 or alpha > 1: - raise ValueError("alpha must satisfy: 0 < alpha <= 1") - comass = (1.0 - alpha) / alpha - else: - raise ValueError("Must pass one of comass, span, halflife, or alpha") - - return float(comass) - - -def _offset(window, center): - if not is_integer(window): - window = len(window) - offset = (window - 1) / 2.0 if center else 0 - try: - return int(offset) - except TypeError: - return offset.astype(int) - - -def _require_min_periods(p): - def _check_func(minp, window): - if minp is None: - return window - else: - return max(p, minp) - - return _check_func - - -def _use_window(minp, window): - if minp is None: - return window - else: - return minp - - -def _zsqrt(x): - with np.errstate(all="ignore"): - result = np.sqrt(x) - mask = x < 0 - - if isinstance(x, ABCDataFrame): - if mask.values.any(): - result[mask] = 0 - else: - if mask.any(): - result[mask] = 0 - - return result - - -def _prep_binary(arg1, arg2): - if not isinstance(arg2, type(arg1)): - raise Exception("Input arrays must be of the same type!") - - # mask out values, this also makes a common index... - X = arg1 + 0 * arg2 - Y = arg2 + 0 * arg1 - - return X, Y - - -# Top-level exports - - -def rolling(obj, win_type=None, **kwds): - if not isinstance(obj, (ABCSeries, ABCDataFrame)): - raise TypeError("invalid type: %s" % type(obj)) - - if win_type is not None: - return Window(obj, win_type=win_type, **kwds) - - return Rolling(obj, **kwds) - - -rolling.__doc__ = Window.__doc__ - - -def expanding(obj, **kwds): - if not isinstance(obj, (ABCSeries, ABCDataFrame)): - raise TypeError("invalid type: %s" % type(obj)) - - return Expanding(obj, **kwds) - - -expanding.__doc__ = Expanding.__doc__ - - -def ewm(obj, **kwds): - if not isinstance(obj, (ABCSeries, ABCDataFrame)): - raise TypeError("invalid type: %s" % type(obj)) - - return EWM(obj, **kwds) - - -ewm.__doc__ = EWM.__doc__ diff --git a/pandas/tests/window/test_ewm.py b/pandas/tests/window/test_ewm.py index a05b567adad7a..1683fda500f85 100644 --- a/pandas/tests/window/test_ewm.py +++ b/pandas/tests/window/test_ewm.py @@ -4,7 +4,7 @@ from pandas.errors import UnsupportedFunctionCall from pandas import DataFrame, Series -import pandas.core.window as rwindow +from pandas.core.window import EWM from pandas.tests.window.common import Base @@ -60,7 +60,7 @@ def test_constructor(self, which): @pytest.mark.parametrize("method", ["std", "mean", "var"]) def test_numpy_compat(self, method): # see gh-12811 - e = rwindow.EWM(Series([2, 4, 6]), alpha=0.5) + e = EWM(Series([2, 4, 6]), alpha=0.5) msg = "numpy operations are not valid with window objects" diff --git a/pandas/tests/window/test_expanding.py b/pandas/tests/window/test_expanding.py index 1e92c981964c5..098acdff93ac6 100644 --- a/pandas/tests/window/test_expanding.py +++ b/pandas/tests/window/test_expanding.py @@ -5,7 +5,7 @@ import pandas as pd from pandas import DataFrame, Series -import pandas.core.window as rwindow +from pandas.core.window import Expanding from pandas.tests.window.common import Base import pandas.util.testing as tm @@ -42,7 +42,7 @@ def test_constructor(self, which): @pytest.mark.parametrize("method", ["std", "mean", "sum", "max", "min", "var"]) def test_numpy_compat(self, method): # see gh-12811 - e = rwindow.Expanding(Series([2, 4, 6]), window=2) + e = Expanding(Series([2, 4, 6]), window=2) msg = "numpy operations are not valid with window objects" diff --git a/pandas/tests/window/test_moments.py b/pandas/tests/window/test_moments.py index d860859958254..3d6cd7d10bd10 100644 --- a/pandas/tests/window/test_moments.py +++ b/pandas/tests/window/test_moments.py @@ -10,7 +10,7 @@ import pandas as pd from pandas import DataFrame, Index, Series, concat, isna, notna -import pandas.core.window as rwindow +from pandas.core.window.common import _flex_binary_moment from pandas.tests.window.common import Base import pandas.util.testing as tm @@ -1878,7 +1878,7 @@ def test_flex_binary_moment(self): " np.ndarray/Series/DataFrame" ) with pytest.raises(TypeError, match=msg): - rwindow._flex_binary_moment(5, 6, None) + _flex_binary_moment(5, 6, None) def test_corr_sanity(self): # GH 3155 diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index f0787ab3d191f..b4787bf25e3bb 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -8,7 +8,7 @@ import pandas as pd from pandas import DataFrame, Series -import pandas.core.window as rwindow +from pandas.core.window import Rolling from pandas.tests.window.common import Base import pandas.util.testing as tm @@ -101,7 +101,7 @@ def test_constructor_timedelta_window_and_minperiods(self, window, raw): @pytest.mark.parametrize("method", ["std", "mean", "sum", "max", "min", "var"]) def test_numpy_compat(self, method): # see gh-12811 - r = rwindow.Rolling(Series([2, 4, 6]), window=2) + r = Rolling(Series([2, 4, 6]), window=2) msg = "numpy operations are not valid with window objects" diff --git a/pandas/tests/window/test_window.py b/pandas/tests/window/test_window.py index a6a56c98a9377..5692404205012 100644 --- a/pandas/tests/window/test_window.py +++ b/pandas/tests/window/test_window.py @@ -6,7 +6,7 @@ import pandas as pd from pandas import Series -import pandas.core.window as rwindow +from pandas.core.window import Window from pandas.tests.window.common import Base @@ -50,7 +50,7 @@ def test_constructor_with_win_type(self, which, win_types): @pytest.mark.parametrize("method", ["sum", "mean"]) def test_numpy_compat(self, method): # see gh-12811 - w = rwindow.Window(Series([2, 4, 6]), window=[0, 2]) + w = Window(Series([2, 4, 6]), window=[0, 2]) msg = "numpy operations are not valid with window objects"
- [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` Splits `pandas/core/window.py` into more logical division of: `pandas/core/window/rolling.py` `pandas/core/window/expanding.py` `pandas/core/window/ewm.py` `pandas/core/window/common.py`
https://api.github.com/repos/pandas-dev/pandas/pulls/27736
2019-08-04T05:33:41Z
2019-08-07T13:32:48Z
2019-08-07T13:32:47Z
2019-08-07T16:53:20Z
REF: separate out invalid ops
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 2747b1d7dd9f1..770870a466aa9 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -44,9 +44,10 @@ from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna from pandas._typing import DatetimeLikeScalar -from pandas.core import missing, nanops, ops +from pandas.core import missing, nanops from pandas.core.algorithms import checked_add_with_arr, take, unique1d, value_counts import pandas.core.common as com +from pandas.core.ops.invalid import make_invalid_op from pandas.tseries import frequencies from pandas.tseries.offsets import DateOffset, Tick @@ -921,18 +922,18 @@ def _is_unique(self): # pow is invalid for all three subclasses; TimedeltaArray will override # the multiplication and division ops - __pow__ = ops.make_invalid_op("__pow__") - __rpow__ = ops.make_invalid_op("__rpow__") - __mul__ = ops.make_invalid_op("__mul__") - __rmul__ = ops.make_invalid_op("__rmul__") - __truediv__ = ops.make_invalid_op("__truediv__") - __rtruediv__ = ops.make_invalid_op("__rtruediv__") - __floordiv__ = ops.make_invalid_op("__floordiv__") - __rfloordiv__ = ops.make_invalid_op("__rfloordiv__") - __mod__ = ops.make_invalid_op("__mod__") - __rmod__ = ops.make_invalid_op("__rmod__") - __divmod__ = ops.make_invalid_op("__divmod__") - __rdivmod__ = ops.make_invalid_op("__rdivmod__") + __pow__ = make_invalid_op("__pow__") + __rpow__ = make_invalid_op("__rpow__") + __mul__ = make_invalid_op("__mul__") + __rmul__ = make_invalid_op("__rmul__") + __truediv__ = make_invalid_op("__truediv__") + __rtruediv__ = make_invalid_op("__rtruediv__") + __floordiv__ = make_invalid_op("__floordiv__") + __rfloordiv__ = make_invalid_op("__rfloordiv__") + __mod__ = make_invalid_op("__mod__") + __rmod__ = make_invalid_op("__rmod__") + __divmod__ = make_invalid_op("__divmod__") + __rdivmod__ = make_invalid_op("__rdivmod__") def _add_datetimelike_scalar(self, other): # Overriden by TimedeltaArray diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 061ee4b90d0e9..28537124536e7 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -53,6 +53,7 @@ from pandas.core.arrays import datetimelike as dtl from pandas.core.arrays._ranges import generate_regular_range import pandas.core.common as com +from pandas.core.ops.invalid import invalid_comparison from pandas.tseries.frequencies import get_period_alias, to_offset from pandas.tseries.offsets import Day, Tick @@ -171,13 +172,13 @@ def wrapper(self, other): other = _to_M8(other, tz=self.tz) except ValueError: # string that cannot be parsed to Timestamp - return ops.invalid_comparison(self, other, op) + return invalid_comparison(self, other, op) result = op(self.asi8, other.view("i8")) if isna(other): result.fill(nat_result) elif lib.is_scalar(other) or np.ndim(other) == 0: - return ops.invalid_comparison(self, other, op) + return invalid_comparison(self, other, op) elif len(other) != len(self): raise ValueError("Lengths must match") else: @@ -191,7 +192,7 @@ def wrapper(self, other): ): # Following Timestamp convention, __eq__ is all-False # and __ne__ is all True, others raise TypeError. - return ops.invalid_comparison(self, other, op) + return invalid_comparison(self, other, op) if is_object_dtype(other): # We have to use _comp_method_OBJECT_ARRAY instead of numpy @@ -204,7 +205,7 @@ def wrapper(self, other): o_mask = isna(other) elif not (is_datetime64_dtype(other) or is_datetime64tz_dtype(other)): # e.g. is_timedelta64_dtype(other) - return ops.invalid_comparison(self, other, op) + return invalid_comparison(self, other, op) else: self._assert_tzawareness_compat(other) if isinstance(other, (ABCIndexClass, ABCSeries)): diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index afd1e8203059e..94dd561fc96f7 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -41,9 +41,9 @@ ) from pandas.core.dtypes.missing import isna -from pandas.core import ops from pandas.core.algorithms import checked_add_with_arr import pandas.core.common as com +from pandas.core.ops.invalid import invalid_comparison from pandas.tseries.frequencies import to_offset from pandas.tseries.offsets import Tick @@ -90,14 +90,14 @@ def wrapper(self, other): other = Timedelta(other) except ValueError: # failed to parse as timedelta - return ops.invalid_comparison(self, other, op) + return invalid_comparison(self, other, op) result = op(self.view("i8"), other.value) if isna(other): result.fill(nat_result) elif not is_list_like(other): - return ops.invalid_comparison(self, other, op) + return invalid_comparison(self, other, op) elif len(other) != len(self): raise ValueError("Lengths must match") @@ -106,7 +106,7 @@ def wrapper(self, other): try: other = type(self)._from_sequence(other)._data except (ValueError, TypeError): - return ops.invalid_comparison(self, other, op) + return invalid_comparison(self, other, op) result = op(self.view("i8"), other.view("i8")) result = com.values_from_object(result) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 2271ff643bc15..57e84282aed72 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -70,7 +70,8 @@ from pandas.core.indexers import maybe_convert_indices from pandas.core.indexes.frozen import FrozenList import pandas.core.missing as missing -from pandas.core.ops import get_op_result_name, make_invalid_op +from pandas.core.ops import get_op_result_name +from pandas.core.ops.invalid import make_invalid_op import pandas.core.sorting as sorting from pandas.core.strings import StringMethods diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 48b3d74e8d02c..4ab1941e3493f 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -49,15 +49,15 @@ import pandas as pd from pandas._typing import ArrayLike from pandas.core.construction import extract_array - -from . import missing -from .docstrings import ( +from pandas.core.ops import missing +from pandas.core.ops.docstrings import ( _arith_doc_FRAME, _flex_comp_doc_FRAME, _make_flex_doc, _op_descriptions, ) -from .roperator import ( # noqa:F401 +from pandas.core.ops.invalid import invalid_comparison +from pandas.core.ops.roperator import ( # noqa:F401 radd, rand_, rdiv, @@ -185,29 +185,6 @@ def maybe_upcast_for_op(obj, shape: Tuple[int, ...]): # ----------------------------------------------------------------------------- -def make_invalid_op(name): - """ - Return a binary method that always raises a TypeError. - - Parameters - ---------- - name : str - - Returns - ------- - invalid_op : function - """ - - def invalid_op(self, other=None): - raise TypeError( - "cannot perform {name} with this index type: " - "{typ}".format(name=name, typ=type(self).__name__) - ) - - invalid_op.__name__ = name - return invalid_op - - def _gen_eval_kwargs(name): """ Find the keyword arguments to pass to numexpr for the given operation. @@ -476,38 +453,6 @@ def masked_arith_op(x, y, op): return result -def invalid_comparison(left, right, op): - """ - If a comparison has mismatched types and is not necessarily meaningful, - follow python3 conventions by: - - - returning all-False for equality - - returning all-True for inequality - - raising TypeError otherwise - - Parameters - ---------- - left : array-like - right : scalar, array-like - op : operator.{eq, ne, lt, le, gt} - - Raises - ------ - TypeError : on inequality comparisons - """ - if op is operator.eq: - res_values = np.zeros(left.shape, dtype=bool) - elif op is operator.ne: - res_values = np.ones(left.shape, dtype=bool) - else: - raise TypeError( - "Invalid comparison between dtype={dtype} and {typ}".format( - dtype=left.dtype, typ=type(right).__name__ - ) - ) - return res_values - - # ----------------------------------------------------------------------------- # Dispatch logic diff --git a/pandas/core/ops/invalid.py b/pandas/core/ops/invalid.py new file mode 100644 index 0000000000000..013ff7689b221 --- /dev/null +++ b/pandas/core/ops/invalid.py @@ -0,0 +1,61 @@ +""" +Templates for invalid operations. +""" +import operator + +import numpy as np + + +def invalid_comparison(left, right, op): + """ + If a comparison has mismatched types and is not necessarily meaningful, + follow python3 conventions by: + + - returning all-False for equality + - returning all-True for inequality + - raising TypeError otherwise + + Parameters + ---------- + left : array-like + right : scalar, array-like + op : operator.{eq, ne, lt, le, gt} + + Raises + ------ + TypeError : on inequality comparisons + """ + if op is operator.eq: + res_values = np.zeros(left.shape, dtype=bool) + elif op is operator.ne: + res_values = np.ones(left.shape, dtype=bool) + else: + raise TypeError( + "Invalid comparison between dtype={dtype} and {typ}".format( + dtype=left.dtype, typ=type(right).__name__ + ) + ) + return res_values + + +def make_invalid_op(name: str): + """ + Return a binary method that always raises a TypeError. + + Parameters + ---------- + name : str + + Returns + ------- + invalid_op : function + """ + + def invalid_op(self, other=None): + raise TypeError( + "cannot perform {name} with this index type: " + "{typ}".format(name=name, typ=type(self).__name__) + ) + + invalid_op.__name__ = name + return invalid_op
We moved ops.py to `ops.__init__` a while back, still need to get the bulk of it out of `__init__`. This separates out templated invalid operations, which are the main things that outside modules import (this helps us move towards getting rid of the `import pandas as pd` in the main file)
https://api.github.com/repos/pandas-dev/pandas/pulls/27735
2019-08-04T00:35:40Z
2019-08-05T15:52:11Z
2019-08-05T15:52:11Z
2019-08-05T16:38:29Z
TYPING: type hints for io.formats.latex
diff --git a/pandas/io/formats/latex.py b/pandas/io/formats/latex.py index dad099b747701..c60e15b733f0a 100644 --- a/pandas/io/formats/latex.py +++ b/pandas/io/formats/latex.py @@ -1,11 +1,13 @@ """ Module for formatting output data in Latex. """ +from typing import IO, List, Optional, Tuple + import numpy as np from pandas.core.dtypes.generic import ABCMultiIndex -from pandas.io.formats.format import TableFormatter +from pandas.io.formats.format import DataFrameFormatter, TableFormatter class LatexFormatter(TableFormatter): @@ -28,12 +30,12 @@ class LatexFormatter(TableFormatter): def __init__( self, - formatter, - column_format=None, - longtable=False, - multicolumn=False, - multicolumn_format=None, - multirow=False, + formatter: DataFrameFormatter, + column_format: Optional[str] = None, + longtable: bool = False, + multicolumn: bool = False, + multicolumn_format: Optional[str] = None, + multirow: bool = False, ): self.fmt = formatter self.frame = self.fmt.frame @@ -44,7 +46,7 @@ def __init__( self.multicolumn_format = multicolumn_format self.multirow = multirow - def write_result(self, buf): + def write_result(self, buf: IO[str]) -> None: """ Render a DataFrame to a LaTeX tabular/longtable environment output. """ @@ -124,7 +126,7 @@ def pad_empties(x): if self.fmt.has_index_names and self.fmt.show_index_names: nlevels += 1 strrows = list(zip(*strcols)) - self.clinebuf = [] + self.clinebuf = [] # type: List[List[int]] for i, row in enumerate(strrows): if i == nlevels and self.fmt.header: @@ -186,7 +188,7 @@ def pad_empties(x): else: buf.write("\\end{longtable}\n") - def _format_multicolumn(self, row, ilevels): + def _format_multicolumn(self, row: List[str], ilevels: int) -> List[str]: r""" Combine columns belonging to a group to a single multicolumn entry according to self.multicolumn_format @@ -227,7 +229,9 @@ def append_col(): append_col() return row2 - def _format_multirow(self, row, ilevels, i, rows): + def _format_multirow( + self, row: List[str], ilevels: int, i: int, rows: List[Tuple[str, ...]] + ) -> List[str]: r""" Check following rows, whether row should be a multirow @@ -254,7 +258,7 @@ def _format_multirow(self, row, ilevels, i, rows): self.clinebuf.append([i + nrow - 1, j + 1]) return row - def _print_cline(self, buf, i, icol): + def _print_cline(self, buf: IO[str], i: int, icol: int) -> None: """ Print clines after multirow-blocks are finished """
https://api.github.com/repos/pandas-dev/pandas/pulls/27734
2019-08-03T20:58:05Z
2019-08-04T21:21:56Z
2019-08-04T21:21:56Z
2019-08-05T09:34:26Z
BUG: fix to_datetime(dti, utc=True)
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index b5bd83fd17530..51307d6771559 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -31,7 +31,7 @@ Categorical Datetimelike ^^^^^^^^^^^^ - +- Bug in :func:`to_datetime` where passing a timezone-naive :class:`DatetimeArray` or :class:`DatetimeIndex` and ``utc=True`` would incorrectly return a timezone-naive result (:issue:`27733`) - - - diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 172084e97a959..b07647cf5b5fb 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -334,6 +334,9 @@ def _convert_listlike_datetimes( return DatetimeIndex(arg, tz=tz, name=name) except ValueError: pass + elif tz: + # DatetimeArray, DatetimeIndex + return arg.tz_localize(tz) return arg diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index 10d422e8aa52c..23540041a3d70 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -1623,6 +1623,18 @@ def test_dayfirst(self, cache): tm.assert_index_equal(expected, idx5) tm.assert_index_equal(expected, idx6) + @pytest.mark.parametrize("klass", [DatetimeIndex, DatetimeArray]) + def test_to_datetime_dta_tz(self, klass): + # GH#27733 + dti = date_range("2015-04-05", periods=3).rename("foo") + expected = dti.tz_localize("UTC") + + obj = klass(dti) + expected = klass(expected) + + result = to_datetime(obj, utc=True) + tm.assert_equal(result, expected) + class TestGuessDatetimeFormat: @td.skip_if_not_us_locale
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry ATM `to_datetime(naive_dti, utc=True)` returns naive incorrect, same for naive `DatetimeArray`
https://api.github.com/repos/pandas-dev/pandas/pulls/27733
2019-08-03T20:26:27Z
2019-08-05T20:29:17Z
2019-08-05T20:29:17Z
2019-08-05T21:09:30Z
REF: define concat classmethods in the appropriate places
diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index 9c49e91134288..12f3fd2c75dc8 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -20,12 +20,11 @@ is_timedelta64_dtype, ) from pandas.core.dtypes.generic import ( + ABCCategoricalIndex, ABCDatetimeArray, - ABCDatetimeIndex, ABCIndexClass, - ABCPeriodIndex, ABCRangeIndex, - ABCTimedeltaIndex, + ABCSeries, ) @@ -285,14 +284,14 @@ def union_categoricals(to_union, sort_categories=False, ignore_order=False): [b, c, a, b] Categories (3, object): [b, c, a] """ - from pandas import Index, Categorical, CategoricalIndex, Series + from pandas import Index, Categorical from pandas.core.arrays.categorical import _recode_for_categories if len(to_union) == 0: raise ValueError("No Categoricals to union") def _maybe_unwrap(x): - if isinstance(x, (CategoricalIndex, Series)): + if isinstance(x, (ABCCategoricalIndex, ABCSeries)): return x.values elif isinstance(x, Categorical): return x @@ -450,31 +449,6 @@ def _concat_datetimetz(to_concat, name=None): return sample._concat_same_type(to_concat) -def _concat_index_same_dtype(indexes, klass=None): - klass = klass if klass is not None else indexes[0].__class__ - return klass(np.concatenate([x._values for x in indexes])) - - -def _concat_index_asobject(to_concat, name=None): - """ - concat all inputs as object. DatetimeIndex, TimedeltaIndex and - PeriodIndex are converted to object dtype before concatenation - """ - from pandas import Index - from pandas.core.arrays import ExtensionArray - - klasses = (ABCDatetimeIndex, ABCTimedeltaIndex, ABCPeriodIndex, ExtensionArray) - to_concat = [x.astype(object) if isinstance(x, klasses) else x for x in to_concat] - - self = to_concat[0] - attribs = self._get_attributes_dict() - attribs["name"] = name - - to_concat = [x._values if isinstance(x, Index) else x for x in to_concat] - - return self._shallow_copy_with_infer(np.concatenate(to_concat), **attribs) - - def _concat_sparse(to_concat, axis=0, typs=None): """ provide concatenation of an sparse/dense array of arrays each of which is a @@ -505,52 +479,3 @@ def _concat_sparse(to_concat, axis=0, typs=None): ] return SparseArray._concat_same_type(to_concat) - - -def _concat_rangeindex_same_dtype(indexes): - """ - Concatenates multiple RangeIndex instances. All members of "indexes" must - be of type RangeIndex; result will be RangeIndex if possible, Int64Index - otherwise. E.g.: - indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6) - indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Int64Index([0,1,2,4,5]) - """ - from pandas import Int64Index, RangeIndex - - start = step = next_ = None - - # Filter the empty indexes - non_empty_indexes = [obj for obj in indexes if len(obj)] - - for obj in non_empty_indexes: - rng = obj._range # type: range - - if start is None: - # This is set by the first non-empty index - start = rng.start - if step is None and len(rng) > 1: - step = rng.step - elif step is None: - # First non-empty index had only one element - if rng.start == start: - return _concat_index_same_dtype(indexes, klass=Int64Index) - step = rng.start - start - - non_consecutive = (step != rng.step and len(rng) > 1) or ( - next_ is not None and rng.start != next_ - ) - if non_consecutive: - return _concat_index_same_dtype(indexes, klass=Int64Index) - - if step is not None: - next_ = rng[-1] + step - - if non_empty_indexes: - # Get the stop value from "next" or alternatively - # from the last non-empty index - stop = non_empty_indexes[-1].stop if next_ is None else next_ - return RangeIndex(start, stop, step) - - # Here all "indexes" had 0 length, i.e. were empty. - # In this case return an empty range index. - return RangeIndex(0, 0) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index ce7b73a92b18a..b167f76d16445 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -51,6 +51,7 @@ ABCDataFrame, ABCDateOffset, ABCDatetimeArray, + ABCDatetimeIndex, ABCIndexClass, ABCMultiIndex, ABCPandasArray, @@ -4309,14 +4310,25 @@ def _concat(self, to_concat, name): if len(typs) == 1: return self._concat_same_dtype(to_concat, name=name) - return _concat._concat_index_asobject(to_concat, name=name) + return Index._concat_same_dtype(self, to_concat, name=name) def _concat_same_dtype(self, to_concat, name): """ Concatenate to_concat which has the same class. """ # must be overridden in specific classes - return _concat._concat_index_asobject(to_concat, name) + klasses = (ABCDatetimeIndex, ABCTimedeltaIndex, ABCPeriodIndex, ExtensionArray) + to_concat = [ + x.astype(object) if isinstance(x, klasses) else x for x in to_concat + ] + + self = to_concat[0] + attribs = self._get_attributes_dict() + attribs["name"] = name + + to_concat = [x._values if isinstance(x, Index) else x for x in to_concat] + + return self._shallow_copy_with_infer(np.concatenate(to_concat), **attribs) def putmask(self, mask, value): """ diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 1a1f8ae826ca7..2cdf73788dd9b 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -17,7 +17,6 @@ needs_i8_conversion, pandas_dtype, ) -import pandas.core.dtypes.concat as _concat from pandas.core.dtypes.generic import ( ABCFloat64Index, ABCInt64Index, @@ -129,7 +128,8 @@ def _assert_safe_casting(cls, data, subarr): pass def _concat_same_dtype(self, indexes, name): - return _concat._concat_index_same_dtype(indexes).rename(name) + result = type(indexes[0])(np.concatenate([x._values for x in indexes])) + return result.rename(name) @property def is_all_dates(self): diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 16098c474a473..a026f08a7560d 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -11,7 +11,6 @@ from pandas.compat.numpy import function as nv from pandas.util._decorators import Appender, cache_readonly -from pandas.core.dtypes import concat as _concat from pandas.core.dtypes.common import ( ensure_platform_int, ensure_python_int, @@ -647,7 +646,53 @@ def join(self, other, how="left", level=None, return_indexers=False, sort=False) return super().join(other, how, level, return_indexers, sort) def _concat_same_dtype(self, indexes, name): - return _concat._concat_rangeindex_same_dtype(indexes).rename(name) + """ + Concatenates multiple RangeIndex instances. All members of "indexes" must + be of type RangeIndex; result will be RangeIndex if possible, Int64Index + otherwise. E.g.: + indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6) + indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Int64Index([0,1,2,4,5]) + """ + start = step = next_ = None + + # Filter the empty indexes + non_empty_indexes = [obj for obj in indexes if len(obj)] + + for obj in non_empty_indexes: + rng = obj._range # type: range + + if start is None: + # This is set by the first non-empty index + start = rng.start + if step is None and len(rng) > 1: + step = rng.step + elif step is None: + # First non-empty index had only one element + if rng.start == start: + result = Int64Index(np.concatenate([x._values for x in indexes])) + return result.rename(name) + + step = rng.start - start + + non_consecutive = (step != rng.step and len(rng) > 1) or ( + next_ is not None and rng.start != next_ + ) + if non_consecutive: + result = Int64Index(np.concatenate([x._values for x in indexes])) + return result.rename(name) + + if step is not None: + next_ = rng[-1] + step + + if non_empty_indexes: + # Get the stop value from "next" or alternatively + # from the last non-empty index + stop = non_empty_indexes[-1].stop if next_ is None else next_ + return RangeIndex(start, stop, step).rename(name) + + # Here all "indexes" had 0 length, i.e. were empty. + # In this case return an empty range index. + return RangeIndex(0, 0).rename(name) def __len__(self): """ diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index e79991f652154..280b0a99c7e68 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -411,7 +411,7 @@ def test_append(self): tm.assert_index_equal(result, expected, exact=True) def test_append_to_another(self): - # hits _concat_index_asobject + # hits Index._concat_same_dtype fst = Index(["a", "b"]) snd = CategoricalIndex(["d", "e"]) result = fst.append(snd)
cc @jorisvandenbossche we briefly discussed at the sprint the idea that `dtype.concat` is a weird place to define these functions. This PR takes the subset of `dtypes.concat` methods that are a) private and b) equivalent to `klass._concat_same_dtype` for some `klass` and moves the implementation to the appropriate class. The categorical one is left in place for now because a) it is public and b) it'd be a pretty big move in and of itself.
https://api.github.com/repos/pandas-dev/pandas/pulls/27727
2019-08-03T02:44:17Z
2019-08-05T11:54:25Z
2019-08-05T11:54:25Z
2019-08-05T14:30:58Z